Deleted Added
full compact
if_bge.c (241343) if_bge.c (241388)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 241343 2012-10-08 07:33:43Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 241388 2012-10-10 01:24:02Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
305 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
306 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
307 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
308 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
309 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
310 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
311 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
312 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
313 /* 5754 and 5787 share the same ASIC ID */
314 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
315 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
316 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
317 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
318 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
319 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
320 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
321 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
322 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
323
324 { 0, NULL }
325};
326
327/*
328 * Some defaults for major revisions, so that newer steppings
329 * that we don't know about have a shot at working.
330 */
331static const struct bge_revision const bge_majorrevs[] = {
332 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
333 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
334 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
335 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
336 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
337 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
338 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
339 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
340 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
341 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
342 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
343 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
344 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
345 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
346 /* 5754 and 5787 share the same ASIC ID */
347 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
348 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
349 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
350 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
351 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
352 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
353 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
354
355 { 0, NULL }
356};
357
358#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
359#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
360#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
361#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
362#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
363#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
364#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
365
366const struct bge_revision * bge_lookup_rev(uint32_t);
367const struct bge_vendor * bge_lookup_vendor(uint16_t);
368
369typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
370
371static int bge_probe(device_t);
372static int bge_attach(device_t);
373static int bge_detach(device_t);
374static int bge_suspend(device_t);
375static int bge_resume(device_t);
376static void bge_release_resources(struct bge_softc *);
377static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
378static int bge_dma_alloc(struct bge_softc *);
379static void bge_dma_free(struct bge_softc *);
380static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
381 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
382
383static void bge_devinfo(struct bge_softc *);
384static int bge_mbox_reorder(struct bge_softc *);
385
386static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
387static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
388static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
389static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
390static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
391
392static void bge_txeof(struct bge_softc *, uint16_t);
393static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
394static int bge_rxeof(struct bge_softc *, uint16_t, int);
395
396static void bge_asf_driver_up (struct bge_softc *);
397static void bge_tick(void *);
398static void bge_stats_clear_regs(struct bge_softc *);
399static void bge_stats_update(struct bge_softc *);
400static void bge_stats_update_regs(struct bge_softc *);
401static struct mbuf *bge_check_short_dma(struct mbuf *);
402static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
403 uint16_t *, uint16_t *);
404static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
405
406static void bge_intr(void *);
407static int bge_msi_intr(void *);
408static void bge_intr_task(void *, int);
409static void bge_start_locked(struct ifnet *);
410static void bge_start(struct ifnet *);
411static int bge_ioctl(struct ifnet *, u_long, caddr_t);
412static void bge_init_locked(struct bge_softc *);
413static void bge_init(void *);
414static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
415static void bge_stop(struct bge_softc *);
416static void bge_watchdog(struct bge_softc *);
417static int bge_shutdown(device_t);
418static int bge_ifmedia_upd_locked(struct ifnet *);
419static int bge_ifmedia_upd(struct ifnet *);
420static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
421
422static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
423static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
424
425static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
426static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
427
428static void bge_setpromisc(struct bge_softc *);
429static void bge_setmulti(struct bge_softc *);
430static void bge_setvlan(struct bge_softc *);
431
432static __inline void bge_rxreuse_std(struct bge_softc *, int);
433static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
434static int bge_newbuf_std(struct bge_softc *, int);
435static int bge_newbuf_jumbo(struct bge_softc *, int);
436static int bge_init_rx_ring_std(struct bge_softc *);
437static void bge_free_rx_ring_std(struct bge_softc *);
438static int bge_init_rx_ring_jumbo(struct bge_softc *);
439static void bge_free_rx_ring_jumbo(struct bge_softc *);
440static void bge_free_tx_ring(struct bge_softc *);
441static int bge_init_tx_ring(struct bge_softc *);
442
443static int bge_chipinit(struct bge_softc *);
444static int bge_blockinit(struct bge_softc *);
445static uint32_t bge_dma_swap_options(struct bge_softc *);
446
447static int bge_has_eaddr(struct bge_softc *);
448static uint32_t bge_readmem_ind(struct bge_softc *, int);
449static void bge_writemem_ind(struct bge_softc *, int, int);
450static void bge_writembx(struct bge_softc *, int, int);
451#ifdef notdef
452static uint32_t bge_readreg_ind(struct bge_softc *, int);
453#endif
454static void bge_writemem_direct(struct bge_softc *, int, int);
455static void bge_writereg_ind(struct bge_softc *, int, int);
456
457static int bge_miibus_readreg(device_t, int, int);
458static int bge_miibus_writereg(device_t, int, int, int);
459static void bge_miibus_statchg(device_t);
460#ifdef DEVICE_POLLING
461static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
462#endif
463
464#define BGE_RESET_START 1
465#define BGE_RESET_STOP 2
466static void bge_sig_post_reset(struct bge_softc *, int);
467static void bge_sig_legacy(struct bge_softc *, int);
468static void bge_sig_pre_reset(struct bge_softc *, int);
469static void bge_stop_fw(struct bge_softc *);
470static int bge_reset(struct bge_softc *);
471static void bge_link_upd(struct bge_softc *);
472
473/*
474 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
475 * leak information to untrusted users. It is also known to cause alignment
476 * traps on certain architectures.
477 */
478#ifdef BGE_REGISTER_DEBUG
479static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
480static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
481static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
482#endif
483static void bge_add_sysctls(struct bge_softc *);
484static void bge_add_sysctl_stats_regs(struct bge_softc *,
485 struct sysctl_ctx_list *, struct sysctl_oid_list *);
486static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
487 struct sysctl_oid_list *);
488static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
489
490static device_method_t bge_methods[] = {
491 /* Device interface */
492 DEVMETHOD(device_probe, bge_probe),
493 DEVMETHOD(device_attach, bge_attach),
494 DEVMETHOD(device_detach, bge_detach),
495 DEVMETHOD(device_shutdown, bge_shutdown),
496 DEVMETHOD(device_suspend, bge_suspend),
497 DEVMETHOD(device_resume, bge_resume),
498
499 /* MII interface */
500 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
501 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
502 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
503
504 DEVMETHOD_END
505};
506
507static driver_t bge_driver = {
508 "bge",
509 bge_methods,
510 sizeof(struct bge_softc)
511};
512
513static devclass_t bge_devclass;
514
515DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
516DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
517
518static int bge_allow_asf = 1;
519
520TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
521
522static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
523SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
524 "Allow ASF mode if available");
525
526#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
527#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
528#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
529#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
530#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
531
532static int
533bge_has_eaddr(struct bge_softc *sc)
534{
535#ifdef __sparc64__
536 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
537 device_t dev;
538 uint32_t subvendor;
539
540 dev = sc->bge_dev;
541
542 /*
543 * The on-board BGEs found in sun4u machines aren't fitted with
544 * an EEPROM which means that we have to obtain the MAC address
545 * via OFW and that some tests will always fail. We distinguish
546 * such BGEs by the subvendor ID, which also has to be obtained
547 * from OFW instead of the PCI configuration space as the latter
548 * indicates Broadcom as the subvendor of the netboot interface.
549 * For early Blade 1500 and 2500 we even have to check the OFW
550 * device path as the subvendor ID always defaults to Broadcom
551 * there.
552 */
553 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
554 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
555 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
556 return (0);
557 memset(buf, 0, sizeof(buf));
558 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
559 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
560 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
561 return (0);
562 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
563 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
564 return (0);
565 }
566#endif
567 return (1);
568}
569
570static uint32_t
571bge_readmem_ind(struct bge_softc *sc, int off)
572{
573 device_t dev;
574 uint32_t val;
575
576 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
577 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
578 return (0);
579
580 dev = sc->bge_dev;
581
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
583 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
584 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
585 return (val);
586}
587
588static void
589bge_writemem_ind(struct bge_softc *sc, int off, int val)
590{
591 device_t dev;
592
593 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
594 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
595 return;
596
597 dev = sc->bge_dev;
598
599 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
600 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
601 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
602}
603
604#ifdef notdef
605static uint32_t
606bge_readreg_ind(struct bge_softc *sc, int off)
607{
608 device_t dev;
609
610 dev = sc->bge_dev;
611
612 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
613 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
614}
615#endif
616
617static void
618bge_writereg_ind(struct bge_softc *sc, int off, int val)
619{
620 device_t dev;
621
622 dev = sc->bge_dev;
623
624 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
625 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
626}
627
628static void
629bge_writemem_direct(struct bge_softc *sc, int off, int val)
630{
631 CSR_WRITE_4(sc, off, val);
632}
633
634static void
635bge_writembx(struct bge_softc *sc, int off, int val)
636{
637 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
638 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
639
640 CSR_WRITE_4(sc, off, val);
641 if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0)
642 CSR_READ_4(sc, off);
643}
644
645/*
646 * Map a single buffer address.
647 */
648
649static void
650bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
651{
652 struct bge_dmamap_arg *ctx;
653
654 if (error)
655 return;
656
657 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
658
659 ctx = arg;
660 ctx->bge_busaddr = segs->ds_addr;
661}
662
663static uint8_t
664bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
665{
666 uint32_t access, byte = 0;
667 int i;
668
669 /* Lock. */
670 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
671 for (i = 0; i < 8000; i++) {
672 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
673 break;
674 DELAY(20);
675 }
676 if (i == 8000)
677 return (1);
678
679 /* Enable access. */
680 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
681 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
682
683 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
684 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
685 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
686 DELAY(10);
687 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
688 DELAY(10);
689 break;
690 }
691 }
692
693 if (i == BGE_TIMEOUT * 10) {
694 if_printf(sc->bge_ifp, "nvram read timed out\n");
695 return (1);
696 }
697
698 /* Get result. */
699 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
700
701 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
702
703 /* Disable access. */
704 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
705
706 /* Unlock. */
707 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
708 CSR_READ_4(sc, BGE_NVRAM_SWARB);
709
710 return (0);
711}
712
713/*
714 * Read a sequence of bytes from NVRAM.
715 */
716static int
717bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
718{
719 int err = 0, i;
720 uint8_t byte = 0;
721
722 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
723 return (1);
724
725 for (i = 0; i < cnt; i++) {
726 err = bge_nvram_getbyte(sc, off + i, &byte);
727 if (err)
728 break;
729 *(dest + i) = byte;
730 }
731
732 return (err ? 1 : 0);
733}
734
735/*
736 * Read a byte of data stored in the EEPROM at address 'addr.' The
737 * BCM570x supports both the traditional bitbang interface and an
738 * auto access interface for reading the EEPROM. We use the auto
739 * access method.
740 */
741static uint8_t
742bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
743{
744 int i;
745 uint32_t byte = 0;
746
747 /*
748 * Enable use of auto EEPROM access so we can avoid
749 * having to use the bitbang method.
750 */
751 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
752
753 /* Reset the EEPROM, load the clock period. */
754 CSR_WRITE_4(sc, BGE_EE_ADDR,
755 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
756 DELAY(20);
757
758 /* Issue the read EEPROM command. */
759 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
760
761 /* Wait for completion */
762 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
763 DELAY(10);
764 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
765 break;
766 }
767
768 if (i == BGE_TIMEOUT * 10) {
769 device_printf(sc->bge_dev, "EEPROM read timed out\n");
770 return (1);
771 }
772
773 /* Get result. */
774 byte = CSR_READ_4(sc, BGE_EE_DATA);
775
776 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
777
778 return (0);
779}
780
781/*
782 * Read a sequence of bytes from the EEPROM.
783 */
784static int
785bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
786{
787 int i, error = 0;
788 uint8_t byte = 0;
789
790 for (i = 0; i < cnt; i++) {
791 error = bge_eeprom_getbyte(sc, off + i, &byte);
792 if (error)
793 break;
794 *(dest + i) = byte;
795 }
796
797 return (error ? 1 : 0);
798}
799
800static int
801bge_miibus_readreg(device_t dev, int phy, int reg)
802{
803 struct bge_softc *sc;
804 uint32_t val;
805 int i;
806
807 sc = device_get_softc(dev);
808
809 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
810 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
811 CSR_WRITE_4(sc, BGE_MI_MODE,
812 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
813 DELAY(80);
814 }
815
816 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
817 BGE_MIPHY(phy) | BGE_MIREG(reg));
818
819 /* Poll for the PHY register access to complete. */
820 for (i = 0; i < BGE_TIMEOUT; i++) {
821 DELAY(10);
822 val = CSR_READ_4(sc, BGE_MI_COMM);
823 if ((val & BGE_MICOMM_BUSY) == 0) {
824 DELAY(5);
825 val = CSR_READ_4(sc, BGE_MI_COMM);
826 break;
827 }
828 }
829
830 if (i == BGE_TIMEOUT) {
831 device_printf(sc->bge_dev,
832 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
833 phy, reg, val);
834 val = 0;
835 }
836
837 /* Restore the autopoll bit if necessary. */
838 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
839 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
840 DELAY(80);
841 }
842
843 if (val & BGE_MICOMM_READFAIL)
844 return (0);
845
846 return (val & 0xFFFF);
847}
848
849static int
850bge_miibus_writereg(device_t dev, int phy, int reg, int val)
851{
852 struct bge_softc *sc;
853 int i;
854
855 sc = device_get_softc(dev);
856
857 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
858 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
859 return (0);
860
861 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
862 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
863 CSR_WRITE_4(sc, BGE_MI_MODE,
864 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
865 DELAY(80);
866 }
867
868 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
869 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
870
871 for (i = 0; i < BGE_TIMEOUT; i++) {
872 DELAY(10);
873 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
874 DELAY(5);
875 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
876 break;
877 }
878 }
879
880 /* Restore the autopoll bit if necessary. */
881 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
882 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
883 DELAY(80);
884 }
885
886 if (i == BGE_TIMEOUT)
887 device_printf(sc->bge_dev,
888 "PHY write timed out (phy %d, reg %d, val %d)\n",
889 phy, reg, val);
890
891 return (0);
892}
893
894static void
895bge_miibus_statchg(device_t dev)
896{
897 struct bge_softc *sc;
898 struct mii_data *mii;
899 uint32_t mac_mode, rx_mode, tx_mode;
900
901 sc = device_get_softc(dev);
902 if ((sc->bge_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
903 return;
904 mii = device_get_softc(sc->bge_miibus);
905
906 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
907 (IFM_ACTIVE | IFM_AVALID)) {
908 switch (IFM_SUBTYPE(mii->mii_media_active)) {
909 case IFM_10_T:
910 case IFM_100_TX:
911 sc->bge_link = 1;
912 break;
913 case IFM_1000_T:
914 case IFM_1000_SX:
915 case IFM_2500_SX:
916 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
917 sc->bge_link = 1;
918 else
919 sc->bge_link = 0;
920 break;
921 default:
922 sc->bge_link = 0;
923 break;
924 }
925 } else
926 sc->bge_link = 0;
927 if (sc->bge_link == 0)
928 return;
929
930 /*
931 * APE firmware touches these registers to keep the MAC
932 * connected to the outside world. Try to keep the
933 * accesses atomic.
934 */
935
936 /* Set the port mode (MII/GMII) to match the link speed. */
937 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
938 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
939 tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
940 rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
941
942 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
943 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
944 mac_mode |= BGE_PORTMODE_GMII;
945 else
946 mac_mode |= BGE_PORTMODE_MII;
947
948 /* Set MAC flow control behavior to match link flow control settings. */
949 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
950 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
951 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
952 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
953 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
954 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
955 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
956 } else
957 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
958
959 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
960 DELAY(40);
961 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
962 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
963}
964
965/*
966 * Intialize a standard receive ring descriptor.
967 */
968static int
969bge_newbuf_std(struct bge_softc *sc, int i)
970{
971 struct mbuf *m;
972 struct bge_rx_bd *r;
973 bus_dma_segment_t segs[1];
974 bus_dmamap_t map;
975 int error, nsegs;
976
977 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
978 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
979 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
980 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
981 if (m == NULL)
982 return (ENOBUFS);
983 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
984 } else {
985 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
986 if (m == NULL)
987 return (ENOBUFS);
988 m->m_len = m->m_pkthdr.len = MCLBYTES;
989 }
990 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
991 m_adj(m, ETHER_ALIGN);
992
993 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
994 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
995 if (error != 0) {
996 m_freem(m);
997 return (error);
998 }
999 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1000 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1001 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
1002 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1003 sc->bge_cdata.bge_rx_std_dmamap[i]);
1004 }
1005 map = sc->bge_cdata.bge_rx_std_dmamap[i];
1006 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
1007 sc->bge_cdata.bge_rx_std_sparemap = map;
1008 sc->bge_cdata.bge_rx_std_chain[i] = m;
1009 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
1010 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
1011 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1012 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1013 r->bge_flags = BGE_RXBDFLAG_END;
1014 r->bge_len = segs[0].ds_len;
1015 r->bge_idx = i;
1016
1017 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1018 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1019
1020 return (0);
1021}
1022
1023/*
1024 * Initialize a jumbo receive ring descriptor. This allocates
1025 * a jumbo buffer from the pool managed internally by the driver.
1026 */
1027static int
1028bge_newbuf_jumbo(struct bge_softc *sc, int i)
1029{
1030 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1031 bus_dmamap_t map;
1032 struct bge_extrx_bd *r;
1033 struct mbuf *m;
1034 int error, nsegs;
1035
1036 MGETHDR(m, M_DONTWAIT, MT_DATA);
1037 if (m == NULL)
1038 return (ENOBUFS);
1039
1040 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1041 if (!(m->m_flags & M_EXT)) {
1042 m_freem(m);
1043 return (ENOBUFS);
1044 }
1045 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1046 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1047 m_adj(m, ETHER_ALIGN);
1048
1049 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1050 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1051 if (error != 0) {
1052 m_freem(m);
1053 return (error);
1054 }
1055
1056 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1057 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1058 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1059 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1060 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1061 }
1062 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1063 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1064 sc->bge_cdata.bge_rx_jumbo_sparemap;
1065 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1066 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1067 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1068 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1069 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1070 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1071
1072 /*
1073 * Fill in the extended RX buffer descriptor.
1074 */
1075 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1076 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1077 r->bge_idx = i;
1078 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1079 switch (nsegs) {
1080 case 4:
1081 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1082 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1083 r->bge_len3 = segs[3].ds_len;
1084 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1085 case 3:
1086 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1087 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1088 r->bge_len2 = segs[2].ds_len;
1089 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1090 case 2:
1091 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1092 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1093 r->bge_len1 = segs[1].ds_len;
1094 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1095 case 1:
1096 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1097 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1098 r->bge_len0 = segs[0].ds_len;
1099 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1100 break;
1101 default:
1102 panic("%s: %d segments\n", __func__, nsegs);
1103 }
1104
1105 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1106 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1107
1108 return (0);
1109}
1110
1111static int
1112bge_init_rx_ring_std(struct bge_softc *sc)
1113{
1114 int error, i;
1115
1116 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1117 sc->bge_std = 0;
1118 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1119 if ((error = bge_newbuf_std(sc, i)) != 0)
1120 return (error);
1121 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1122 }
1123
1124 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1125 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1126
1127 sc->bge_std = 0;
1128 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1129
1130 return (0);
1131}
1132
1133static void
1134bge_free_rx_ring_std(struct bge_softc *sc)
1135{
1136 int i;
1137
1138 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1139 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1140 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1141 sc->bge_cdata.bge_rx_std_dmamap[i],
1142 BUS_DMASYNC_POSTREAD);
1143 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1144 sc->bge_cdata.bge_rx_std_dmamap[i]);
1145 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1146 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1147 }
1148 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1149 sizeof(struct bge_rx_bd));
1150 }
1151}
1152
1153static int
1154bge_init_rx_ring_jumbo(struct bge_softc *sc)
1155{
1156 struct bge_rcb *rcb;
1157 int error, i;
1158
1159 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1160 sc->bge_jumbo = 0;
1161 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1162 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1163 return (error);
1164 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1165 }
1166
1167 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1168 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1169
1170 sc->bge_jumbo = 0;
1171
1172 /* Enable the jumbo receive producer ring. */
1173 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1174 rcb->bge_maxlen_flags =
1175 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1176 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1177
1178 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1179
1180 return (0);
1181}
1182
1183static void
1184bge_free_rx_ring_jumbo(struct bge_softc *sc)
1185{
1186 int i;
1187
1188 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1189 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1190 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1191 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1192 BUS_DMASYNC_POSTREAD);
1193 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1194 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1195 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1196 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1197 }
1198 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1199 sizeof(struct bge_extrx_bd));
1200 }
1201}
1202
1203static void
1204bge_free_tx_ring(struct bge_softc *sc)
1205{
1206 int i;
1207
1208 if (sc->bge_ldata.bge_tx_ring == NULL)
1209 return;
1210
1211 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1212 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1213 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1214 sc->bge_cdata.bge_tx_dmamap[i],
1215 BUS_DMASYNC_POSTWRITE);
1216 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1217 sc->bge_cdata.bge_tx_dmamap[i]);
1218 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1219 sc->bge_cdata.bge_tx_chain[i] = NULL;
1220 }
1221 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1222 sizeof(struct bge_tx_bd));
1223 }
1224}
1225
1226static int
1227bge_init_tx_ring(struct bge_softc *sc)
1228{
1229 sc->bge_txcnt = 0;
1230 sc->bge_tx_saved_considx = 0;
1231
1232 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1233 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1234 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1235
1236 /* Initialize transmit producer index for host-memory send ring. */
1237 sc->bge_tx_prodidx = 0;
1238 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1239
1240 /* 5700 b2 errata */
1241 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1242 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1243
1244 /* NIC-memory send ring not used; initialize to zero. */
1245 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1246 /* 5700 b2 errata */
1247 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1248 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1249
1250 return (0);
1251}
1252
1253static void
1254bge_setpromisc(struct bge_softc *sc)
1255{
1256 struct ifnet *ifp;
1257
1258 BGE_LOCK_ASSERT(sc);
1259
1260 ifp = sc->bge_ifp;
1261
1262 /* Enable or disable promiscuous mode as needed. */
1263 if (ifp->if_flags & IFF_PROMISC)
1264 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1265 else
1266 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1267}
1268
1269static void
1270bge_setmulti(struct bge_softc *sc)
1271{
1272 struct ifnet *ifp;
1273 struct ifmultiaddr *ifma;
1274 uint32_t hashes[4] = { 0, 0, 0, 0 };
1275 int h, i;
1276
1277 BGE_LOCK_ASSERT(sc);
1278
1279 ifp = sc->bge_ifp;
1280
1281 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1282 for (i = 0; i < 4; i++)
1283 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1284 return;
1285 }
1286
1287 /* First, zot all the existing filters. */
1288 for (i = 0; i < 4; i++)
1289 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1290
1291 /* Now program new ones. */
1292 if_maddr_rlock(ifp);
1293 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1294 if (ifma->ifma_addr->sa_family != AF_LINK)
1295 continue;
1296 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1297 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1298 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1299 }
1300 if_maddr_runlock(ifp);
1301
1302 for (i = 0; i < 4; i++)
1303 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1304}
1305
1306static void
1307bge_setvlan(struct bge_softc *sc)
1308{
1309 struct ifnet *ifp;
1310
1311 BGE_LOCK_ASSERT(sc);
1312
1313 ifp = sc->bge_ifp;
1314
1315 /* Enable or disable VLAN tag stripping as needed. */
1316 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1317 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1318 else
1319 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1320}
1321
1322static void
1323bge_sig_pre_reset(struct bge_softc *sc, int type)
1324{
1325
1326 /*
1327 * Some chips don't like this so only do this if ASF is enabled
1328 */
1329 if (sc->bge_asf_mode)
1330 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1331
1332 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1333 switch (type) {
1334 case BGE_RESET_START:
1335 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1336 BGE_FW_DRV_STATE_START);
1337 break;
1338 case BGE_RESET_STOP:
1339 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1340 BGE_FW_DRV_STATE_UNLOAD);
1341 break;
1342 }
1343 }
1344}
1345
1346static void
1347bge_sig_post_reset(struct bge_softc *sc, int type)
1348{
1349
1350 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1351 switch (type) {
1352 case BGE_RESET_START:
1353 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1354 BGE_FW_DRV_STATE_START_DONE);
1355 /* START DONE */
1356 break;
1357 case BGE_RESET_STOP:
1358 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1359 BGE_FW_DRV_STATE_UNLOAD_DONE);
1360 break;
1361 }
1362 }
1363}
1364
1365static void
1366bge_sig_legacy(struct bge_softc *sc, int type)
1367{
1368
1369 if (sc->bge_asf_mode) {
1370 switch (type) {
1371 case BGE_RESET_START:
1372 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1373 BGE_FW_DRV_STATE_START);
1374 break;
1375 case BGE_RESET_STOP:
1376 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1377 BGE_FW_DRV_STATE_UNLOAD);
1378 break;
1379 }
1380 }
1381}
1382
1383static void
1384bge_stop_fw(struct bge_softc *sc)
1385{
1386 int i;
1387
1388 if (sc->bge_asf_mode) {
1389 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1390 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1391 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1392
1393 for (i = 0; i < 100; i++ ) {
1394 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1395 BGE_RX_CPU_DRV_EVENT))
1396 break;
1397 DELAY(10);
1398 }
1399 }
1400}
1401
1402static uint32_t
1403bge_dma_swap_options(struct bge_softc *sc)
1404{
1405 uint32_t dma_options;
1406
1407 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1408 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1409#if BYTE_ORDER == BIG_ENDIAN
1410 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1411#endif
1412 if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
1413 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1414 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1415 BGE_MODECTL_HTX2B_ENABLE;
1416
1417 return (dma_options);
1418}
1419
1420/*
1421 * Do endian, PCI and DMA initialization.
1422 */
1423static int
1424bge_chipinit(struct bge_softc *sc)
1425{
1426 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1427 uint16_t val;
1428 int i;
1429
1430 /* Set endianness before we access any non-PCI registers. */
1431 misc_ctl = BGE_INIT;
1432 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1433 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1434 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1435
1436 /* Clear the MAC control register */
1437 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1438 DELAY(40);
1439
1440 /*
1441 * Clear the MAC statistics block in the NIC's
1442 * internal memory.
1443 */
1444 for (i = BGE_STATS_BLOCK;
1445 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1446 BGE_MEMWIN_WRITE(sc, i, 0);
1447
1448 for (i = BGE_STATUS_BLOCK;
1449 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1450 BGE_MEMWIN_WRITE(sc, i, 0);
1451
1452 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1453 /*
1454 * Fix data corruption caused by non-qword write with WB.
1455 * Fix master abort in PCI mode.
1456 * Fix PCI latency timer.
1457 */
1458 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1459 val |= (1 << 10) | (1 << 12) | (1 << 13);
1460 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1461 }
1462
1463 /*
1464 * Set up the PCI DMA control register.
1465 */
1466 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1467 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1468 if (sc->bge_flags & BGE_FLAG_PCIE) {
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
305 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
306 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
307 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
308 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
309 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
310 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
311 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
312 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
313 /* 5754 and 5787 share the same ASIC ID */
314 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
315 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
316 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
317 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
318 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
319 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
320 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
321 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
322 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
323
324 { 0, NULL }
325};
326
327/*
328 * Some defaults for major revisions, so that newer steppings
329 * that we don't know about have a shot at working.
330 */
331static const struct bge_revision const bge_majorrevs[] = {
332 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
333 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
334 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
335 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
336 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
337 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
338 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
339 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
340 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
341 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
342 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
343 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
344 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
345 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
346 /* 5754 and 5787 share the same ASIC ID */
347 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
348 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
349 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
350 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
351 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
352 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
353 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
354
355 { 0, NULL }
356};
357
358#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
359#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
360#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
361#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
362#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
363#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
364#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
365
366const struct bge_revision * bge_lookup_rev(uint32_t);
367const struct bge_vendor * bge_lookup_vendor(uint16_t);
368
369typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
370
371static int bge_probe(device_t);
372static int bge_attach(device_t);
373static int bge_detach(device_t);
374static int bge_suspend(device_t);
375static int bge_resume(device_t);
376static void bge_release_resources(struct bge_softc *);
377static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
378static int bge_dma_alloc(struct bge_softc *);
379static void bge_dma_free(struct bge_softc *);
380static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
381 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
382
383static void bge_devinfo(struct bge_softc *);
384static int bge_mbox_reorder(struct bge_softc *);
385
386static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
387static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
388static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
389static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
390static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
391
392static void bge_txeof(struct bge_softc *, uint16_t);
393static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
394static int bge_rxeof(struct bge_softc *, uint16_t, int);
395
396static void bge_asf_driver_up (struct bge_softc *);
397static void bge_tick(void *);
398static void bge_stats_clear_regs(struct bge_softc *);
399static void bge_stats_update(struct bge_softc *);
400static void bge_stats_update_regs(struct bge_softc *);
401static struct mbuf *bge_check_short_dma(struct mbuf *);
402static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
403 uint16_t *, uint16_t *);
404static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
405
406static void bge_intr(void *);
407static int bge_msi_intr(void *);
408static void bge_intr_task(void *, int);
409static void bge_start_locked(struct ifnet *);
410static void bge_start(struct ifnet *);
411static int bge_ioctl(struct ifnet *, u_long, caddr_t);
412static void bge_init_locked(struct bge_softc *);
413static void bge_init(void *);
414static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
415static void bge_stop(struct bge_softc *);
416static void bge_watchdog(struct bge_softc *);
417static int bge_shutdown(device_t);
418static int bge_ifmedia_upd_locked(struct ifnet *);
419static int bge_ifmedia_upd(struct ifnet *);
420static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
421
422static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
423static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
424
425static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
426static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
427
428static void bge_setpromisc(struct bge_softc *);
429static void bge_setmulti(struct bge_softc *);
430static void bge_setvlan(struct bge_softc *);
431
432static __inline void bge_rxreuse_std(struct bge_softc *, int);
433static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
434static int bge_newbuf_std(struct bge_softc *, int);
435static int bge_newbuf_jumbo(struct bge_softc *, int);
436static int bge_init_rx_ring_std(struct bge_softc *);
437static void bge_free_rx_ring_std(struct bge_softc *);
438static int bge_init_rx_ring_jumbo(struct bge_softc *);
439static void bge_free_rx_ring_jumbo(struct bge_softc *);
440static void bge_free_tx_ring(struct bge_softc *);
441static int bge_init_tx_ring(struct bge_softc *);
442
443static int bge_chipinit(struct bge_softc *);
444static int bge_blockinit(struct bge_softc *);
445static uint32_t bge_dma_swap_options(struct bge_softc *);
446
447static int bge_has_eaddr(struct bge_softc *);
448static uint32_t bge_readmem_ind(struct bge_softc *, int);
449static void bge_writemem_ind(struct bge_softc *, int, int);
450static void bge_writembx(struct bge_softc *, int, int);
451#ifdef notdef
452static uint32_t bge_readreg_ind(struct bge_softc *, int);
453#endif
454static void bge_writemem_direct(struct bge_softc *, int, int);
455static void bge_writereg_ind(struct bge_softc *, int, int);
456
457static int bge_miibus_readreg(device_t, int, int);
458static int bge_miibus_writereg(device_t, int, int, int);
459static void bge_miibus_statchg(device_t);
460#ifdef DEVICE_POLLING
461static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
462#endif
463
464#define BGE_RESET_START 1
465#define BGE_RESET_STOP 2
466static void bge_sig_post_reset(struct bge_softc *, int);
467static void bge_sig_legacy(struct bge_softc *, int);
468static void bge_sig_pre_reset(struct bge_softc *, int);
469static void bge_stop_fw(struct bge_softc *);
470static int bge_reset(struct bge_softc *);
471static void bge_link_upd(struct bge_softc *);
472
473/*
474 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
475 * leak information to untrusted users. It is also known to cause alignment
476 * traps on certain architectures.
477 */
478#ifdef BGE_REGISTER_DEBUG
479static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
480static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
481static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
482#endif
483static void bge_add_sysctls(struct bge_softc *);
484static void bge_add_sysctl_stats_regs(struct bge_softc *,
485 struct sysctl_ctx_list *, struct sysctl_oid_list *);
486static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
487 struct sysctl_oid_list *);
488static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
489
490static device_method_t bge_methods[] = {
491 /* Device interface */
492 DEVMETHOD(device_probe, bge_probe),
493 DEVMETHOD(device_attach, bge_attach),
494 DEVMETHOD(device_detach, bge_detach),
495 DEVMETHOD(device_shutdown, bge_shutdown),
496 DEVMETHOD(device_suspend, bge_suspend),
497 DEVMETHOD(device_resume, bge_resume),
498
499 /* MII interface */
500 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
501 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
502 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
503
504 DEVMETHOD_END
505};
506
507static driver_t bge_driver = {
508 "bge",
509 bge_methods,
510 sizeof(struct bge_softc)
511};
512
513static devclass_t bge_devclass;
514
515DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
516DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
517
518static int bge_allow_asf = 1;
519
520TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
521
522static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
523SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
524 "Allow ASF mode if available");
525
526#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
527#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
528#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
529#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
530#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
531
532static int
533bge_has_eaddr(struct bge_softc *sc)
534{
535#ifdef __sparc64__
536 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
537 device_t dev;
538 uint32_t subvendor;
539
540 dev = sc->bge_dev;
541
542 /*
543 * The on-board BGEs found in sun4u machines aren't fitted with
544 * an EEPROM which means that we have to obtain the MAC address
545 * via OFW and that some tests will always fail. We distinguish
546 * such BGEs by the subvendor ID, which also has to be obtained
547 * from OFW instead of the PCI configuration space as the latter
548 * indicates Broadcom as the subvendor of the netboot interface.
549 * For early Blade 1500 and 2500 we even have to check the OFW
550 * device path as the subvendor ID always defaults to Broadcom
551 * there.
552 */
553 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
554 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
555 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
556 return (0);
557 memset(buf, 0, sizeof(buf));
558 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
559 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
560 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
561 return (0);
562 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
563 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
564 return (0);
565 }
566#endif
567 return (1);
568}
569
570static uint32_t
571bge_readmem_ind(struct bge_softc *sc, int off)
572{
573 device_t dev;
574 uint32_t val;
575
576 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
577 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
578 return (0);
579
580 dev = sc->bge_dev;
581
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
583 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
584 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
585 return (val);
586}
587
588static void
589bge_writemem_ind(struct bge_softc *sc, int off, int val)
590{
591 device_t dev;
592
593 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
594 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
595 return;
596
597 dev = sc->bge_dev;
598
599 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
600 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
601 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
602}
603
604#ifdef notdef
605static uint32_t
606bge_readreg_ind(struct bge_softc *sc, int off)
607{
608 device_t dev;
609
610 dev = sc->bge_dev;
611
612 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
613 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
614}
615#endif
616
617static void
618bge_writereg_ind(struct bge_softc *sc, int off, int val)
619{
620 device_t dev;
621
622 dev = sc->bge_dev;
623
624 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
625 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
626}
627
628static void
629bge_writemem_direct(struct bge_softc *sc, int off, int val)
630{
631 CSR_WRITE_4(sc, off, val);
632}
633
634static void
635bge_writembx(struct bge_softc *sc, int off, int val)
636{
637 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
638 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
639
640 CSR_WRITE_4(sc, off, val);
641 if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0)
642 CSR_READ_4(sc, off);
643}
644
645/*
646 * Map a single buffer address.
647 */
648
649static void
650bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
651{
652 struct bge_dmamap_arg *ctx;
653
654 if (error)
655 return;
656
657 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
658
659 ctx = arg;
660 ctx->bge_busaddr = segs->ds_addr;
661}
662
663static uint8_t
664bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
665{
666 uint32_t access, byte = 0;
667 int i;
668
669 /* Lock. */
670 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
671 for (i = 0; i < 8000; i++) {
672 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
673 break;
674 DELAY(20);
675 }
676 if (i == 8000)
677 return (1);
678
679 /* Enable access. */
680 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
681 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
682
683 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
684 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
685 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
686 DELAY(10);
687 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
688 DELAY(10);
689 break;
690 }
691 }
692
693 if (i == BGE_TIMEOUT * 10) {
694 if_printf(sc->bge_ifp, "nvram read timed out\n");
695 return (1);
696 }
697
698 /* Get result. */
699 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
700
701 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
702
703 /* Disable access. */
704 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
705
706 /* Unlock. */
707 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
708 CSR_READ_4(sc, BGE_NVRAM_SWARB);
709
710 return (0);
711}
712
713/*
714 * Read a sequence of bytes from NVRAM.
715 */
716static int
717bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
718{
719 int err = 0, i;
720 uint8_t byte = 0;
721
722 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
723 return (1);
724
725 for (i = 0; i < cnt; i++) {
726 err = bge_nvram_getbyte(sc, off + i, &byte);
727 if (err)
728 break;
729 *(dest + i) = byte;
730 }
731
732 return (err ? 1 : 0);
733}
734
735/*
736 * Read a byte of data stored in the EEPROM at address 'addr.' The
737 * BCM570x supports both the traditional bitbang interface and an
738 * auto access interface for reading the EEPROM. We use the auto
739 * access method.
740 */
741static uint8_t
742bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
743{
744 int i;
745 uint32_t byte = 0;
746
747 /*
748 * Enable use of auto EEPROM access so we can avoid
749 * having to use the bitbang method.
750 */
751 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
752
753 /* Reset the EEPROM, load the clock period. */
754 CSR_WRITE_4(sc, BGE_EE_ADDR,
755 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
756 DELAY(20);
757
758 /* Issue the read EEPROM command. */
759 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
760
761 /* Wait for completion */
762 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
763 DELAY(10);
764 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
765 break;
766 }
767
768 if (i == BGE_TIMEOUT * 10) {
769 device_printf(sc->bge_dev, "EEPROM read timed out\n");
770 return (1);
771 }
772
773 /* Get result. */
774 byte = CSR_READ_4(sc, BGE_EE_DATA);
775
776 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
777
778 return (0);
779}
780
781/*
782 * Read a sequence of bytes from the EEPROM.
783 */
784static int
785bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
786{
787 int i, error = 0;
788 uint8_t byte = 0;
789
790 for (i = 0; i < cnt; i++) {
791 error = bge_eeprom_getbyte(sc, off + i, &byte);
792 if (error)
793 break;
794 *(dest + i) = byte;
795 }
796
797 return (error ? 1 : 0);
798}
799
800static int
801bge_miibus_readreg(device_t dev, int phy, int reg)
802{
803 struct bge_softc *sc;
804 uint32_t val;
805 int i;
806
807 sc = device_get_softc(dev);
808
809 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
810 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
811 CSR_WRITE_4(sc, BGE_MI_MODE,
812 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
813 DELAY(80);
814 }
815
816 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
817 BGE_MIPHY(phy) | BGE_MIREG(reg));
818
819 /* Poll for the PHY register access to complete. */
820 for (i = 0; i < BGE_TIMEOUT; i++) {
821 DELAY(10);
822 val = CSR_READ_4(sc, BGE_MI_COMM);
823 if ((val & BGE_MICOMM_BUSY) == 0) {
824 DELAY(5);
825 val = CSR_READ_4(sc, BGE_MI_COMM);
826 break;
827 }
828 }
829
830 if (i == BGE_TIMEOUT) {
831 device_printf(sc->bge_dev,
832 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
833 phy, reg, val);
834 val = 0;
835 }
836
837 /* Restore the autopoll bit if necessary. */
838 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
839 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
840 DELAY(80);
841 }
842
843 if (val & BGE_MICOMM_READFAIL)
844 return (0);
845
846 return (val & 0xFFFF);
847}
848
849static int
850bge_miibus_writereg(device_t dev, int phy, int reg, int val)
851{
852 struct bge_softc *sc;
853 int i;
854
855 sc = device_get_softc(dev);
856
857 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
858 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
859 return (0);
860
861 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
862 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
863 CSR_WRITE_4(sc, BGE_MI_MODE,
864 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
865 DELAY(80);
866 }
867
868 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
869 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
870
871 for (i = 0; i < BGE_TIMEOUT; i++) {
872 DELAY(10);
873 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
874 DELAY(5);
875 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
876 break;
877 }
878 }
879
880 /* Restore the autopoll bit if necessary. */
881 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
882 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
883 DELAY(80);
884 }
885
886 if (i == BGE_TIMEOUT)
887 device_printf(sc->bge_dev,
888 "PHY write timed out (phy %d, reg %d, val %d)\n",
889 phy, reg, val);
890
891 return (0);
892}
893
894static void
895bge_miibus_statchg(device_t dev)
896{
897 struct bge_softc *sc;
898 struct mii_data *mii;
899 uint32_t mac_mode, rx_mode, tx_mode;
900
901 sc = device_get_softc(dev);
902 if ((sc->bge_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
903 return;
904 mii = device_get_softc(sc->bge_miibus);
905
906 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
907 (IFM_ACTIVE | IFM_AVALID)) {
908 switch (IFM_SUBTYPE(mii->mii_media_active)) {
909 case IFM_10_T:
910 case IFM_100_TX:
911 sc->bge_link = 1;
912 break;
913 case IFM_1000_T:
914 case IFM_1000_SX:
915 case IFM_2500_SX:
916 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
917 sc->bge_link = 1;
918 else
919 sc->bge_link = 0;
920 break;
921 default:
922 sc->bge_link = 0;
923 break;
924 }
925 } else
926 sc->bge_link = 0;
927 if (sc->bge_link == 0)
928 return;
929
930 /*
931 * APE firmware touches these registers to keep the MAC
932 * connected to the outside world. Try to keep the
933 * accesses atomic.
934 */
935
936 /* Set the port mode (MII/GMII) to match the link speed. */
937 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
938 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
939 tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
940 rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
941
942 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
943 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
944 mac_mode |= BGE_PORTMODE_GMII;
945 else
946 mac_mode |= BGE_PORTMODE_MII;
947
948 /* Set MAC flow control behavior to match link flow control settings. */
949 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
950 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
951 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
952 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
953 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
954 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
955 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
956 } else
957 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
958
959 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
960 DELAY(40);
961 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
962 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
963}
964
965/*
966 * Intialize a standard receive ring descriptor.
967 */
968static int
969bge_newbuf_std(struct bge_softc *sc, int i)
970{
971 struct mbuf *m;
972 struct bge_rx_bd *r;
973 bus_dma_segment_t segs[1];
974 bus_dmamap_t map;
975 int error, nsegs;
976
977 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
978 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
979 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
980 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
981 if (m == NULL)
982 return (ENOBUFS);
983 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
984 } else {
985 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
986 if (m == NULL)
987 return (ENOBUFS);
988 m->m_len = m->m_pkthdr.len = MCLBYTES;
989 }
990 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
991 m_adj(m, ETHER_ALIGN);
992
993 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
994 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
995 if (error != 0) {
996 m_freem(m);
997 return (error);
998 }
999 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1000 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1001 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
1002 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1003 sc->bge_cdata.bge_rx_std_dmamap[i]);
1004 }
1005 map = sc->bge_cdata.bge_rx_std_dmamap[i];
1006 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
1007 sc->bge_cdata.bge_rx_std_sparemap = map;
1008 sc->bge_cdata.bge_rx_std_chain[i] = m;
1009 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
1010 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
1011 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1012 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1013 r->bge_flags = BGE_RXBDFLAG_END;
1014 r->bge_len = segs[0].ds_len;
1015 r->bge_idx = i;
1016
1017 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1018 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1019
1020 return (0);
1021}
1022
1023/*
1024 * Initialize a jumbo receive ring descriptor. This allocates
1025 * a jumbo buffer from the pool managed internally by the driver.
1026 */
1027static int
1028bge_newbuf_jumbo(struct bge_softc *sc, int i)
1029{
1030 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1031 bus_dmamap_t map;
1032 struct bge_extrx_bd *r;
1033 struct mbuf *m;
1034 int error, nsegs;
1035
1036 MGETHDR(m, M_DONTWAIT, MT_DATA);
1037 if (m == NULL)
1038 return (ENOBUFS);
1039
1040 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1041 if (!(m->m_flags & M_EXT)) {
1042 m_freem(m);
1043 return (ENOBUFS);
1044 }
1045 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1046 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1047 m_adj(m, ETHER_ALIGN);
1048
1049 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1050 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1051 if (error != 0) {
1052 m_freem(m);
1053 return (error);
1054 }
1055
1056 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1057 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1058 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1059 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1060 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1061 }
1062 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1063 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1064 sc->bge_cdata.bge_rx_jumbo_sparemap;
1065 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1066 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1067 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1068 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1069 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1070 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1071
1072 /*
1073 * Fill in the extended RX buffer descriptor.
1074 */
1075 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1076 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1077 r->bge_idx = i;
1078 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1079 switch (nsegs) {
1080 case 4:
1081 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1082 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1083 r->bge_len3 = segs[3].ds_len;
1084 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1085 case 3:
1086 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1087 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1088 r->bge_len2 = segs[2].ds_len;
1089 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1090 case 2:
1091 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1092 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1093 r->bge_len1 = segs[1].ds_len;
1094 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1095 case 1:
1096 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1097 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1098 r->bge_len0 = segs[0].ds_len;
1099 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1100 break;
1101 default:
1102 panic("%s: %d segments\n", __func__, nsegs);
1103 }
1104
1105 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1106 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1107
1108 return (0);
1109}
1110
1111static int
1112bge_init_rx_ring_std(struct bge_softc *sc)
1113{
1114 int error, i;
1115
1116 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1117 sc->bge_std = 0;
1118 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1119 if ((error = bge_newbuf_std(sc, i)) != 0)
1120 return (error);
1121 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1122 }
1123
1124 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1125 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1126
1127 sc->bge_std = 0;
1128 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1129
1130 return (0);
1131}
1132
1133static void
1134bge_free_rx_ring_std(struct bge_softc *sc)
1135{
1136 int i;
1137
1138 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1139 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1140 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1141 sc->bge_cdata.bge_rx_std_dmamap[i],
1142 BUS_DMASYNC_POSTREAD);
1143 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1144 sc->bge_cdata.bge_rx_std_dmamap[i]);
1145 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1146 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1147 }
1148 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1149 sizeof(struct bge_rx_bd));
1150 }
1151}
1152
1153static int
1154bge_init_rx_ring_jumbo(struct bge_softc *sc)
1155{
1156 struct bge_rcb *rcb;
1157 int error, i;
1158
1159 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1160 sc->bge_jumbo = 0;
1161 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1162 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1163 return (error);
1164 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1165 }
1166
1167 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1168 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1169
1170 sc->bge_jumbo = 0;
1171
1172 /* Enable the jumbo receive producer ring. */
1173 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1174 rcb->bge_maxlen_flags =
1175 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1176 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1177
1178 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1179
1180 return (0);
1181}
1182
1183static void
1184bge_free_rx_ring_jumbo(struct bge_softc *sc)
1185{
1186 int i;
1187
1188 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1189 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1190 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1191 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1192 BUS_DMASYNC_POSTREAD);
1193 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1194 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1195 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1196 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1197 }
1198 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1199 sizeof(struct bge_extrx_bd));
1200 }
1201}
1202
1203static void
1204bge_free_tx_ring(struct bge_softc *sc)
1205{
1206 int i;
1207
1208 if (sc->bge_ldata.bge_tx_ring == NULL)
1209 return;
1210
1211 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1212 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1213 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1214 sc->bge_cdata.bge_tx_dmamap[i],
1215 BUS_DMASYNC_POSTWRITE);
1216 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1217 sc->bge_cdata.bge_tx_dmamap[i]);
1218 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1219 sc->bge_cdata.bge_tx_chain[i] = NULL;
1220 }
1221 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1222 sizeof(struct bge_tx_bd));
1223 }
1224}
1225
1226static int
1227bge_init_tx_ring(struct bge_softc *sc)
1228{
1229 sc->bge_txcnt = 0;
1230 sc->bge_tx_saved_considx = 0;
1231
1232 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1233 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1234 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1235
1236 /* Initialize transmit producer index for host-memory send ring. */
1237 sc->bge_tx_prodidx = 0;
1238 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1239
1240 /* 5700 b2 errata */
1241 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1242 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1243
1244 /* NIC-memory send ring not used; initialize to zero. */
1245 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1246 /* 5700 b2 errata */
1247 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1248 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1249
1250 return (0);
1251}
1252
1253static void
1254bge_setpromisc(struct bge_softc *sc)
1255{
1256 struct ifnet *ifp;
1257
1258 BGE_LOCK_ASSERT(sc);
1259
1260 ifp = sc->bge_ifp;
1261
1262 /* Enable or disable promiscuous mode as needed. */
1263 if (ifp->if_flags & IFF_PROMISC)
1264 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1265 else
1266 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1267}
1268
1269static void
1270bge_setmulti(struct bge_softc *sc)
1271{
1272 struct ifnet *ifp;
1273 struct ifmultiaddr *ifma;
1274 uint32_t hashes[4] = { 0, 0, 0, 0 };
1275 int h, i;
1276
1277 BGE_LOCK_ASSERT(sc);
1278
1279 ifp = sc->bge_ifp;
1280
1281 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1282 for (i = 0; i < 4; i++)
1283 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1284 return;
1285 }
1286
1287 /* First, zot all the existing filters. */
1288 for (i = 0; i < 4; i++)
1289 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1290
1291 /* Now program new ones. */
1292 if_maddr_rlock(ifp);
1293 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1294 if (ifma->ifma_addr->sa_family != AF_LINK)
1295 continue;
1296 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1297 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1298 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1299 }
1300 if_maddr_runlock(ifp);
1301
1302 for (i = 0; i < 4; i++)
1303 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1304}
1305
1306static void
1307bge_setvlan(struct bge_softc *sc)
1308{
1309 struct ifnet *ifp;
1310
1311 BGE_LOCK_ASSERT(sc);
1312
1313 ifp = sc->bge_ifp;
1314
1315 /* Enable or disable VLAN tag stripping as needed. */
1316 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1317 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1318 else
1319 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1320}
1321
1322static void
1323bge_sig_pre_reset(struct bge_softc *sc, int type)
1324{
1325
1326 /*
1327 * Some chips don't like this so only do this if ASF is enabled
1328 */
1329 if (sc->bge_asf_mode)
1330 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1331
1332 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1333 switch (type) {
1334 case BGE_RESET_START:
1335 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1336 BGE_FW_DRV_STATE_START);
1337 break;
1338 case BGE_RESET_STOP:
1339 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1340 BGE_FW_DRV_STATE_UNLOAD);
1341 break;
1342 }
1343 }
1344}
1345
1346static void
1347bge_sig_post_reset(struct bge_softc *sc, int type)
1348{
1349
1350 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1351 switch (type) {
1352 case BGE_RESET_START:
1353 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1354 BGE_FW_DRV_STATE_START_DONE);
1355 /* START DONE */
1356 break;
1357 case BGE_RESET_STOP:
1358 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1359 BGE_FW_DRV_STATE_UNLOAD_DONE);
1360 break;
1361 }
1362 }
1363}
1364
1365static void
1366bge_sig_legacy(struct bge_softc *sc, int type)
1367{
1368
1369 if (sc->bge_asf_mode) {
1370 switch (type) {
1371 case BGE_RESET_START:
1372 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1373 BGE_FW_DRV_STATE_START);
1374 break;
1375 case BGE_RESET_STOP:
1376 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1377 BGE_FW_DRV_STATE_UNLOAD);
1378 break;
1379 }
1380 }
1381}
1382
1383static void
1384bge_stop_fw(struct bge_softc *sc)
1385{
1386 int i;
1387
1388 if (sc->bge_asf_mode) {
1389 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1390 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1391 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1392
1393 for (i = 0; i < 100; i++ ) {
1394 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1395 BGE_RX_CPU_DRV_EVENT))
1396 break;
1397 DELAY(10);
1398 }
1399 }
1400}
1401
1402static uint32_t
1403bge_dma_swap_options(struct bge_softc *sc)
1404{
1405 uint32_t dma_options;
1406
1407 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1408 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1409#if BYTE_ORDER == BIG_ENDIAN
1410 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1411#endif
1412 if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
1413 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1414 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1415 BGE_MODECTL_HTX2B_ENABLE;
1416
1417 return (dma_options);
1418}
1419
1420/*
1421 * Do endian, PCI and DMA initialization.
1422 */
1423static int
1424bge_chipinit(struct bge_softc *sc)
1425{
1426 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1427 uint16_t val;
1428 int i;
1429
1430 /* Set endianness before we access any non-PCI registers. */
1431 misc_ctl = BGE_INIT;
1432 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1433 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1434 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1435
1436 /* Clear the MAC control register */
1437 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1438 DELAY(40);
1439
1440 /*
1441 * Clear the MAC statistics block in the NIC's
1442 * internal memory.
1443 */
1444 for (i = BGE_STATS_BLOCK;
1445 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1446 BGE_MEMWIN_WRITE(sc, i, 0);
1447
1448 for (i = BGE_STATUS_BLOCK;
1449 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1450 BGE_MEMWIN_WRITE(sc, i, 0);
1451
1452 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1453 /*
1454 * Fix data corruption caused by non-qword write with WB.
1455 * Fix master abort in PCI mode.
1456 * Fix PCI latency timer.
1457 */
1458 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1459 val |= (1 << 10) | (1 << 12) | (1 << 13);
1460 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1461 }
1462
1463 /*
1464 * Set up the PCI DMA control register.
1465 */
1466 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1467 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1468 if (sc->bge_flags & BGE_FLAG_PCIE) {
1469 /* Read watermark not used, 128 bytes for write. */
1470 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1469 if (sc->bge_mps >= 256)
1470 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1471 else
1472 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1471 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1472 if (BGE_IS_5714_FAMILY(sc)) {
1473 /* 256 bytes for read and write. */
1474 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1475 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1476 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1477 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1478 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1479 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1480 /*
1481 * In the BCM5703, the DMA read watermark should
1482 * be set to less than or equal to the maximum
1483 * memory read byte count of the PCI-X command
1484 * register.
1485 */
1486 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1487 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1488 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1489 /* 1536 bytes for read, 384 bytes for write. */
1490 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1491 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1492 } else {
1493 /* 384 bytes for read and write. */
1494 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1495 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1496 0x0F;
1497 }
1498 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1499 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1500 uint32_t tmp;
1501
1502 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1503 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1504 if (tmp == 6 || tmp == 7)
1505 dma_rw_ctl |=
1506 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1507
1508 /* Set PCI-X DMA write workaround. */
1509 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1510 }
1511 } else {
1512 /* Conventional PCI bus: 256 bytes for read and write. */
1513 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1514 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1515
1516 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1517 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1518 dma_rw_ctl |= 0x0F;
1519 }
1520 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1521 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1522 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1523 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1524 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1525 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1526 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1527 if (BGE_IS_5717_PLUS(sc)) {
1528 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1529 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1530 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1531 /*
1532 * Enable HW workaround for controllers that misinterpret
1533 * a status tag update and leave interrupts permanently
1534 * disabled.
1535 */
1536 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1537 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1538 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1539 }
1540 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1541
1542 /*
1543 * Set up general mode register.
1544 */
1545 mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1546 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1547
1548 /*
1549 * BCM5701 B5 have a bug causing data corruption when using
1550 * 64-bit DMA reads, which can be terminated early and then
1551 * completed later as 32-bit accesses, in combination with
1552 * certain bridges.
1553 */
1554 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1555 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1556 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1557
1558 /*
1559 * Tell the firmware the driver is running
1560 */
1561 if (sc->bge_asf_mode & ASF_STACKUP)
1562 mode_ctl |= BGE_MODECTL_STACKUP;
1563
1564 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1565
1566 /*
1567 * Disable memory write invalidate. Apparently it is not supported
1568 * properly by these devices. Also ensure that INTx isn't disabled,
1569 * as these chips need it even when using MSI.
1570 */
1571 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1572 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1573
1574 /* Set the timer prescaler (always 66Mhz) */
1575 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1576
1577 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1578 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1579 DELAY(40); /* XXX */
1580
1581 /* Put PHY into ready state */
1582 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1583 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1584 DELAY(40);
1585 }
1586
1587 return (0);
1588}
1589
1590static int
1591bge_blockinit(struct bge_softc *sc)
1592{
1593 struct bge_rcb *rcb;
1594 bus_size_t vrcb;
1595 bge_hostaddr taddr;
1596 uint32_t dmactl, val;
1597 int i, limit;
1598
1599 /*
1600 * Initialize the memory window pointer register so that
1601 * we can access the first 32K of internal NIC RAM. This will
1602 * allow us to set up the TX send ring RCBs and the RX return
1603 * ring RCBs, plus other things which live in NIC memory.
1604 */
1605 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1606
1607 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1608
1609 if (!(BGE_IS_5705_PLUS(sc))) {
1610 /* Configure mbuf memory pool */
1611 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1612 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1613 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1614 else
1615 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1616
1617 /* Configure DMA resource pool */
1618 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1619 BGE_DMA_DESCRIPTORS);
1620 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1621 }
1622
1623 /* Configure mbuf pool watermarks */
1624 if (BGE_IS_5717_PLUS(sc)) {
1625 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1626 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1627 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1628 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1629 } else {
1630 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1631 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1632 }
1633 } else if (!BGE_IS_5705_PLUS(sc)) {
1634 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1635 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1636 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1637 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1638 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1639 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1640 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1641 } else {
1642 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1643 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1644 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1645 }
1646
1647 /* Configure DMA resource watermarks */
1648 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1649 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1650
1651 /* Enable buffer manager */
1652 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1653 /*
1654 * Change the arbitration algorithm of TXMBUF read request to
1655 * round-robin instead of priority based for BCM5719. When
1656 * TXFIFO is almost empty, RDMA will hold its request until
1657 * TXFIFO is not almost empty.
1658 */
1659 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1660 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1661 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1662
1663 /* Poll for buffer manager start indication */
1664 for (i = 0; i < BGE_TIMEOUT; i++) {
1665 DELAY(10);
1666 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1667 break;
1668 }
1669
1670 if (i == BGE_TIMEOUT) {
1671 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1672 return (ENXIO);
1673 }
1674
1675 /* Enable flow-through queues */
1676 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1677 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1678
1679 /* Wait until queue initialization is complete */
1680 for (i = 0; i < BGE_TIMEOUT; i++) {
1681 DELAY(10);
1682 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1683 break;
1684 }
1685
1686 if (i == BGE_TIMEOUT) {
1687 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1688 return (ENXIO);
1689 }
1690
1691 /*
1692 * Summary of rings supported by the controller:
1693 *
1694 * Standard Receive Producer Ring
1695 * - This ring is used to feed receive buffers for "standard"
1696 * sized frames (typically 1536 bytes) to the controller.
1697 *
1698 * Jumbo Receive Producer Ring
1699 * - This ring is used to feed receive buffers for jumbo sized
1700 * frames (i.e. anything bigger than the "standard" frames)
1701 * to the controller.
1702 *
1703 * Mini Receive Producer Ring
1704 * - This ring is used to feed receive buffers for "mini"
1705 * sized frames to the controller.
1706 * - This feature required external memory for the controller
1707 * but was never used in a production system. Should always
1708 * be disabled.
1709 *
1710 * Receive Return Ring
1711 * - After the controller has placed an incoming frame into a
1712 * receive buffer that buffer is moved into a receive return
1713 * ring. The driver is then responsible to passing the
1714 * buffer up to the stack. Many versions of the controller
1715 * support multiple RR rings.
1716 *
1717 * Send Ring
1718 * - This ring is used for outgoing frames. Many versions of
1719 * the controller support multiple send rings.
1720 */
1721
1722 /* Initialize the standard receive producer ring control block. */
1723 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1724 rcb->bge_hostaddr.bge_addr_lo =
1725 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1726 rcb->bge_hostaddr.bge_addr_hi =
1727 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1728 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1729 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1730 if (BGE_IS_5717_PLUS(sc)) {
1731 /*
1732 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1733 * Bits 15-2 : Maximum RX frame size
1734 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1735 * Bit 0 : Reserved
1736 */
1737 rcb->bge_maxlen_flags =
1738 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1739 } else if (BGE_IS_5705_PLUS(sc)) {
1740 /*
1741 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1742 * Bits 15-2 : Reserved (should be 0)
1743 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1744 * Bit 0 : Reserved
1745 */
1746 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1747 } else {
1748 /*
1749 * Ring size is always XXX entries
1750 * Bits 31-16: Maximum RX frame size
1751 * Bits 15-2 : Reserved (should be 0)
1752 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1753 * Bit 0 : Reserved
1754 */
1755 rcb->bge_maxlen_flags =
1756 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1757 }
1758 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1759 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1760 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1761 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1762 else
1763 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1764 /* Write the standard receive producer ring control block. */
1765 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1766 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1767 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1768 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1769
1770 /* Reset the standard receive producer ring producer index. */
1771 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1772
1773 /*
1774 * Initialize the jumbo RX producer ring control
1775 * block. We set the 'ring disabled' bit in the
1776 * flags field until we're actually ready to start
1777 * using this ring (i.e. once we set the MTU
1778 * high enough to require it).
1779 */
1780 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1781 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1782 /* Get the jumbo receive producer ring RCB parameters. */
1783 rcb->bge_hostaddr.bge_addr_lo =
1784 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1785 rcb->bge_hostaddr.bge_addr_hi =
1786 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1787 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1788 sc->bge_cdata.bge_rx_jumbo_ring_map,
1789 BUS_DMASYNC_PREREAD);
1790 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1791 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1792 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1793 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1794 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1795 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1796 else
1797 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1798 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1799 rcb->bge_hostaddr.bge_addr_hi);
1800 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1801 rcb->bge_hostaddr.bge_addr_lo);
1802 /* Program the jumbo receive producer ring RCB parameters. */
1803 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1804 rcb->bge_maxlen_flags);
1805 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1806 /* Reset the jumbo receive producer ring producer index. */
1807 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1808 }
1809
1810 /* Disable the mini receive producer ring RCB. */
1811 if (BGE_IS_5700_FAMILY(sc)) {
1812 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1813 rcb->bge_maxlen_flags =
1814 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1815 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1816 rcb->bge_maxlen_flags);
1817 /* Reset the mini receive producer ring producer index. */
1818 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1819 }
1820
1821 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1822 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1823 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1824 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1825 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1826 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1827 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1828 }
1829 /*
1830 * The BD ring replenish thresholds control how often the
1831 * hardware fetches new BD's from the producer rings in host
1832 * memory. Setting the value too low on a busy system can
1833 * starve the hardware and recue the throughpout.
1834 *
1835 * Set the BD ring replentish thresholds. The recommended
1836 * values are 1/8th the number of descriptors allocated to
1837 * each ring.
1838 * XXX The 5754 requires a lower threshold, so it might be a
1839 * requirement of all 575x family chips. The Linux driver sets
1840 * the lower threshold for all 5705 family chips as well, but there
1841 * are reports that it might not need to be so strict.
1842 *
1843 * XXX Linux does some extra fiddling here for the 5906 parts as
1844 * well.
1845 */
1846 if (BGE_IS_5705_PLUS(sc))
1847 val = 8;
1848 else
1849 val = BGE_STD_RX_RING_CNT / 8;
1850 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1851 if (BGE_IS_JUMBO_CAPABLE(sc))
1852 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1853 BGE_JUMBO_RX_RING_CNT/8);
1854 if (BGE_IS_5717_PLUS(sc)) {
1855 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1856 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1857 }
1858
1859 /*
1860 * Disable all send rings by setting the 'ring disabled' bit
1861 * in the flags field of all the TX send ring control blocks,
1862 * located in NIC memory.
1863 */
1864 if (!BGE_IS_5705_PLUS(sc))
1865 /* 5700 to 5704 had 16 send rings. */
1866 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1867 else
1868 limit = 1;
1869 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1870 for (i = 0; i < limit; i++) {
1871 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1872 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1873 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1874 vrcb += sizeof(struct bge_rcb);
1875 }
1876
1877 /* Configure send ring RCB 0 (we use only the first ring) */
1878 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1879 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1880 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1881 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1882 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1883 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1884 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1885 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1886 else
1887 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1888 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1889 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1890 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1891
1892 /*
1893 * Disable all receive return rings by setting the
1894 * 'ring diabled' bit in the flags field of all the receive
1895 * return ring control blocks, located in NIC memory.
1896 */
1897 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1898 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1899 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
1900 /* Should be 17, use 16 until we get an SRAM map. */
1901 limit = 16;
1902 } else if (!BGE_IS_5705_PLUS(sc))
1903 limit = BGE_RX_RINGS_MAX;
1904 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1905 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1906 limit = 4;
1907 else
1908 limit = 1;
1909 /* Disable all receive return rings. */
1910 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1911 for (i = 0; i < limit; i++) {
1912 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1913 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1914 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1915 BGE_RCB_FLAG_RING_DISABLED);
1916 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1917 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1918 (i * (sizeof(uint64_t))), 0);
1919 vrcb += sizeof(struct bge_rcb);
1920 }
1921
1922 /*
1923 * Set up receive return ring 0. Note that the NIC address
1924 * for RX return rings is 0x0. The return rings live entirely
1925 * within the host, so the nicaddr field in the RCB isn't used.
1926 */
1927 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1928 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1929 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1930 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1931 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1932 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1933 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1934
1935 /* Set random backoff seed for TX */
1936 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1937 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1938 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1939 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1940 BGE_TX_BACKOFF_SEED_MASK);
1941
1942 /* Set inter-packet gap */
1943 val = 0x2620;
1944 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
1945 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1946 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1947 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1948
1949 /*
1950 * Specify which ring to use for packets that don't match
1951 * any RX rules.
1952 */
1953 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1954
1955 /*
1956 * Configure number of RX lists. One interrupt distribution
1957 * list, sixteen active lists, one bad frames class.
1958 */
1959 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1960
1961 /* Inialize RX list placement stats mask. */
1962 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1963 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1964
1965 /* Disable host coalescing until we get it set up */
1966 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1967
1968 /* Poll to make sure it's shut down. */
1969 for (i = 0; i < BGE_TIMEOUT; i++) {
1970 DELAY(10);
1971 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1972 break;
1973 }
1974
1975 if (i == BGE_TIMEOUT) {
1976 device_printf(sc->bge_dev,
1977 "host coalescing engine failed to idle\n");
1978 return (ENXIO);
1979 }
1980
1981 /* Set up host coalescing defaults */
1982 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1983 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1984 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1985 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1986 if (!(BGE_IS_5705_PLUS(sc))) {
1987 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1988 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1989 }
1990 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1991 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1992
1993 /* Set up address of statistics block */
1994 if (!(BGE_IS_5705_PLUS(sc))) {
1995 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1996 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1997 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1998 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1999 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2000 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2001 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2002 }
2003
2004 /* Set up address of status block */
2005 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
2006 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
2007 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
2008 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
2009
2010 /* Set up status block size. */
2011 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2012 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2013 val = BGE_STATBLKSZ_FULL;
2014 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2015 } else {
2016 val = BGE_STATBLKSZ_32BYTE;
2017 bzero(sc->bge_ldata.bge_status_block, 32);
2018 }
2019 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2020 sc->bge_cdata.bge_status_map,
2021 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2022
2023 /* Turn on host coalescing state machine */
2024 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2025
2026 /* Turn on RX BD completion state machine and enable attentions */
2027 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2028 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2029
2030 /* Turn on RX list placement state machine */
2031 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2032
2033 /* Turn on RX list selector state machine. */
2034 if (!(BGE_IS_5705_PLUS(sc)))
2035 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2036
2037 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2038 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2039 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2040 BGE_MACMODE_FRMHDR_DMA_ENB;
2041
2042 if (sc->bge_flags & BGE_FLAG_TBI)
2043 val |= BGE_PORTMODE_TBI;
2044 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2045 val |= BGE_PORTMODE_GMII;
2046 else
2047 val |= BGE_PORTMODE_MII;
2048
2049 /* Turn on DMA, clear stats */
2050 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2051 DELAY(40);
2052
2053 /* Set misc. local control, enable interrupts on attentions */
2054 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2055
2056#ifdef notdef
2057 /* Assert GPIO pins for PHY reset */
2058 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2059 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2060 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2061 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2062#endif
2063
2064 /* Turn on DMA completion state machine */
2065 if (!(BGE_IS_5705_PLUS(sc)))
2066 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2067
2068 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2069
2070 /* Enable host coalescing bug fix. */
2071 if (BGE_IS_5755_PLUS(sc))
2072 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2073
2074 /* Request larger DMA burst size to get better performance. */
2075 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2076 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2077
2078 /* Turn on write DMA state machine */
2079 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2080 DELAY(40);
2081
2082 /* Turn on read DMA state machine */
2083 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2084
2085 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2086 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2087
2088 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2089 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2090 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2091 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2092 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2093 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2094 if (sc->bge_flags & BGE_FLAG_PCIE)
2095 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2096 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2097 val |= BGE_RDMAMODE_TSO4_ENABLE;
2098 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2099 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2100 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2101 val |= BGE_RDMAMODE_TSO6_ENABLE;
2102 }
2103
2104 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2105 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2106 BGE_RDMAMODE_H2BNC_VLAN_DET;
2107 /*
2108 * Allow multiple outstanding read requests from
2109 * non-LSO read DMA engine.
2110 */
2111 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2112 }
2113
2114 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2115 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2116 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2117 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2118 BGE_IS_5717_PLUS(sc)) {
2119 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2120 /*
2121 * Adjust tx margin to prevent TX data corruption and
2122 * fix internal FIFO overflow.
2123 */
2124 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2125 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2126 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2127 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2128 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2129 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2130 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2131 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2132 }
2133 /*
2134 * Enable fix for read DMA FIFO overruns.
2135 * The fix is to limit the number of RX BDs
2136 * the hardware would fetch at a fime.
2137 */
2138 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2139 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2140 }
2141
2142 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2143 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2144 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2145 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2146 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2147 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2148 /*
2149 * Allow 4KB burst length reads for non-LSO frames.
2150 * Enable 512B burst length reads for buffer descriptors.
2151 */
2152 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2153 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2154 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2155 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2156 }
2157
2158 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2159 DELAY(40);
2160
2161 /* Turn on RX data completion state machine */
2162 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2163
2164 /* Turn on RX BD initiator state machine */
2165 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2166
2167 /* Turn on RX data and RX BD initiator state machine */
2168 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2169
2170 /* Turn on Mbuf cluster free state machine */
2171 if (!(BGE_IS_5705_PLUS(sc)))
2172 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2173
2174 /* Turn on send BD completion state machine */
2175 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2176
2177 /* Turn on send data completion state machine */
2178 val = BGE_SDCMODE_ENABLE;
2179 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2180 val |= BGE_SDCMODE_CDELAY;
2181 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2182
2183 /* Turn on send data initiator state machine */
2184 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2185 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2186 BGE_SDIMODE_HW_LSO_PRE_DMA);
2187 else
2188 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2189
2190 /* Turn on send BD initiator state machine */
2191 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2192
2193 /* Turn on send BD selector state machine */
2194 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2195
2196 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2197 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2198 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2199
2200 /* ack/clear link change events */
2201 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2202 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2203 BGE_MACSTAT_LINK_CHANGED);
2204 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2205
2206 /*
2207 * Enable attention when the link has changed state for
2208 * devices that use auto polling.
2209 */
2210 if (sc->bge_flags & BGE_FLAG_TBI) {
2211 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2212 } else {
2213 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2214 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2215 DELAY(80);
2216 }
2217 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2218 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2219 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2220 BGE_EVTENB_MI_INTERRUPT);
2221 }
2222
2223 /*
2224 * Clear any pending link state attention.
2225 * Otherwise some link state change events may be lost until attention
2226 * is cleared by bge_intr() -> bge_link_upd() sequence.
2227 * It's not necessary on newer BCM chips - perhaps enabling link
2228 * state change attentions implies clearing pending attention.
2229 */
2230 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2231 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2232 BGE_MACSTAT_LINK_CHANGED);
2233
2234 /* Enable link state change attentions. */
2235 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2236
2237 return (0);
2238}
2239
2240const struct bge_revision *
2241bge_lookup_rev(uint32_t chipid)
2242{
2243 const struct bge_revision *br;
2244
2245 for (br = bge_revisions; br->br_name != NULL; br++) {
2246 if (br->br_chipid == chipid)
2247 return (br);
2248 }
2249
2250 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2251 if (br->br_chipid == BGE_ASICREV(chipid))
2252 return (br);
2253 }
2254
2255 return (NULL);
2256}
2257
2258const struct bge_vendor *
2259bge_lookup_vendor(uint16_t vid)
2260{
2261 const struct bge_vendor *v;
2262
2263 for (v = bge_vendors; v->v_name != NULL; v++)
2264 if (v->v_id == vid)
2265 return (v);
2266
2267 panic("%s: unknown vendor %d", __func__, vid);
2268 return (NULL);
2269}
2270
2271/*
2272 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2273 * against our list and return its name if we find a match.
2274 *
2275 * Note that since the Broadcom controller contains VPD support, we
2276 * try to get the device name string from the controller itself instead
2277 * of the compiled-in string. It guarantees we'll always announce the
2278 * right product name. We fall back to the compiled-in string when
2279 * VPD is unavailable or corrupt.
2280 */
2281static int
2282bge_probe(device_t dev)
2283{
2284 char buf[96];
2285 char model[64];
2286 const struct bge_revision *br;
2287 const char *pname;
2288 struct bge_softc *sc = device_get_softc(dev);
2289 const struct bge_type *t = bge_devs;
2290 const struct bge_vendor *v;
2291 uint32_t id;
2292 uint16_t did, vid;
2293
2294 sc->bge_dev = dev;
2295 vid = pci_get_vendor(dev);
2296 did = pci_get_device(dev);
2297 while(t->bge_vid != 0) {
2298 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2299 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2300 BGE_PCIMISCCTL_ASICREV_SHIFT;
2301 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2302 /*
2303 * Find the ASCI revision. Different chips
2304 * use different registers.
2305 */
2306 switch (pci_get_device(dev)) {
2307 case BCOM_DEVICEID_BCM5717:
2308 case BCOM_DEVICEID_BCM5718:
2309 case BCOM_DEVICEID_BCM5719:
2310 case BCOM_DEVICEID_BCM5720:
2311 id = pci_read_config(dev,
2312 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2313 break;
2314 case BCOM_DEVICEID_BCM57761:
2315 case BCOM_DEVICEID_BCM57765:
2316 case BCOM_DEVICEID_BCM57781:
2317 case BCOM_DEVICEID_BCM57785:
2318 case BCOM_DEVICEID_BCM57791:
2319 case BCOM_DEVICEID_BCM57795:
2320 id = pci_read_config(dev,
2321 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2322 break;
2323 default:
2324 id = pci_read_config(dev,
2325 BGE_PCI_PRODID_ASICREV, 4);
2326 }
2327 }
2328 br = bge_lookup_rev(id);
2329 v = bge_lookup_vendor(vid);
2330 if (bge_has_eaddr(sc) &&
2331 pci_get_vpd_ident(dev, &pname) == 0)
2332 snprintf(model, 64, "%s", pname);
2333 else
2334 snprintf(model, 64, "%s %s", v->v_name,
2335 br != NULL ? br->br_name :
2336 "NetXtreme Ethernet Controller");
2337 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2338 br != NULL ? "" : "unknown ", id);
2339 device_set_desc_copy(dev, buf);
2340 return (0);
2341 }
2342 t++;
2343 }
2344
2345 return (ENXIO);
2346}
2347
2348static void
2349bge_dma_free(struct bge_softc *sc)
2350{
2351 int i;
2352
2353 /* Destroy DMA maps for RX buffers. */
2354 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2355 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2356 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2357 sc->bge_cdata.bge_rx_std_dmamap[i]);
2358 }
2359 if (sc->bge_cdata.bge_rx_std_sparemap)
2360 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2361 sc->bge_cdata.bge_rx_std_sparemap);
2362
2363 /* Destroy DMA maps for jumbo RX buffers. */
2364 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2365 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2366 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2367 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2368 }
2369 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2370 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2371 sc->bge_cdata.bge_rx_jumbo_sparemap);
2372
2373 /* Destroy DMA maps for TX buffers. */
2374 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2375 if (sc->bge_cdata.bge_tx_dmamap[i])
2376 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2377 sc->bge_cdata.bge_tx_dmamap[i]);
2378 }
2379
2380 if (sc->bge_cdata.bge_rx_mtag)
2381 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2382 if (sc->bge_cdata.bge_mtag_jumbo)
2383 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2384 if (sc->bge_cdata.bge_tx_mtag)
2385 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2386
2387 /* Destroy standard RX ring. */
2388 if (sc->bge_cdata.bge_rx_std_ring_map)
2389 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2390 sc->bge_cdata.bge_rx_std_ring_map);
2391 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2392 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2393 sc->bge_ldata.bge_rx_std_ring,
2394 sc->bge_cdata.bge_rx_std_ring_map);
2395
2396 if (sc->bge_cdata.bge_rx_std_ring_tag)
2397 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2398
2399 /* Destroy jumbo RX ring. */
2400 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2401 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2402 sc->bge_cdata.bge_rx_jumbo_ring_map);
2403
2404 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2405 sc->bge_ldata.bge_rx_jumbo_ring)
2406 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2407 sc->bge_ldata.bge_rx_jumbo_ring,
2408 sc->bge_cdata.bge_rx_jumbo_ring_map);
2409
2410 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2411 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2412
2413 /* Destroy RX return ring. */
2414 if (sc->bge_cdata.bge_rx_return_ring_map)
2415 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2416 sc->bge_cdata.bge_rx_return_ring_map);
2417
2418 if (sc->bge_cdata.bge_rx_return_ring_map &&
2419 sc->bge_ldata.bge_rx_return_ring)
2420 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2421 sc->bge_ldata.bge_rx_return_ring,
2422 sc->bge_cdata.bge_rx_return_ring_map);
2423
2424 if (sc->bge_cdata.bge_rx_return_ring_tag)
2425 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2426
2427 /* Destroy TX ring. */
2428 if (sc->bge_cdata.bge_tx_ring_map)
2429 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2430 sc->bge_cdata.bge_tx_ring_map);
2431
2432 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2433 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2434 sc->bge_ldata.bge_tx_ring,
2435 sc->bge_cdata.bge_tx_ring_map);
2436
2437 if (sc->bge_cdata.bge_tx_ring_tag)
2438 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2439
2440 /* Destroy status block. */
2441 if (sc->bge_cdata.bge_status_map)
2442 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2443 sc->bge_cdata.bge_status_map);
2444
2445 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2446 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2447 sc->bge_ldata.bge_status_block,
2448 sc->bge_cdata.bge_status_map);
2449
2450 if (sc->bge_cdata.bge_status_tag)
2451 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2452
2453 /* Destroy statistics block. */
2454 if (sc->bge_cdata.bge_stats_map)
2455 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2456 sc->bge_cdata.bge_stats_map);
2457
2458 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2459 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2460 sc->bge_ldata.bge_stats,
2461 sc->bge_cdata.bge_stats_map);
2462
2463 if (sc->bge_cdata.bge_stats_tag)
2464 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2465
2466 if (sc->bge_cdata.bge_buffer_tag)
2467 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2468
2469 /* Destroy the parent tag. */
2470 if (sc->bge_cdata.bge_parent_tag)
2471 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2472}
2473
2474static int
2475bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2476 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2477 bus_addr_t *paddr, const char *msg)
2478{
2479 struct bge_dmamap_arg ctx;
2480 int error;
2481
2482 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2483 alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2484 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2485 if (error != 0) {
2486 device_printf(sc->bge_dev,
2487 "could not create %s dma tag\n", msg);
2488 return (ENOMEM);
2489 }
2490 /* Allocate DMA'able memory for ring. */
2491 error = bus_dmamem_alloc(*tag, (void **)ring,
2492 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2493 if (error != 0) {
2494 device_printf(sc->bge_dev,
2495 "could not allocate DMA'able memory for %s\n", msg);
2496 return (ENOMEM);
2497 }
2498 /* Load the address of the ring. */
2499 ctx.bge_busaddr = 0;
2500 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2501 &ctx, BUS_DMA_NOWAIT);
2502 if (error != 0) {
2503 device_printf(sc->bge_dev,
2504 "could not load DMA'able memory for %s\n", msg);
2505 return (ENOMEM);
2506 }
2507 *paddr = ctx.bge_busaddr;
2508 return (0);
2509}
2510
2511static int
2512bge_dma_alloc(struct bge_softc *sc)
2513{
2514 bus_addr_t lowaddr;
2515 bus_size_t rxmaxsegsz, sbsz, txsegsz, txmaxsegsz;
2516 int i, error;
2517
2518 lowaddr = BUS_SPACE_MAXADDR;
2519 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2520 lowaddr = BGE_DMA_MAXADDR;
2521 /*
2522 * Allocate the parent bus DMA tag appropriate for PCI.
2523 */
2524 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2525 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2526 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2527 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2528 if (error != 0) {
2529 device_printf(sc->bge_dev,
2530 "could not allocate parent dma tag\n");
2531 return (ENOMEM);
2532 }
2533
2534 /* Create tag for standard RX ring. */
2535 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2536 &sc->bge_cdata.bge_rx_std_ring_tag,
2537 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2538 &sc->bge_cdata.bge_rx_std_ring_map,
2539 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2540 if (error)
2541 return (error);
2542
2543 /* Create tag for RX return ring. */
2544 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2545 &sc->bge_cdata.bge_rx_return_ring_tag,
2546 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2547 &sc->bge_cdata.bge_rx_return_ring_map,
2548 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2549 if (error)
2550 return (error);
2551
2552 /* Create tag for TX ring. */
2553 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2554 &sc->bge_cdata.bge_tx_ring_tag,
2555 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2556 &sc->bge_cdata.bge_tx_ring_map,
2557 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2558 if (error)
2559 return (error);
2560
2561 /*
2562 * Create tag for status block.
2563 * Because we only use single Tx/Rx/Rx return ring, use
2564 * minimum status block size except BCM5700 AX/BX which
2565 * seems to want to see full status block size regardless
2566 * of configured number of ring.
2567 */
2568 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2569 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2570 sbsz = BGE_STATUS_BLK_SZ;
2571 else
2572 sbsz = 32;
2573 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2574 &sc->bge_cdata.bge_status_tag,
2575 (uint8_t **)&sc->bge_ldata.bge_status_block,
2576 &sc->bge_cdata.bge_status_map,
2577 &sc->bge_ldata.bge_status_block_paddr, "status block");
2578 if (error)
2579 return (error);
2580
2581 /* Create tag for statistics block. */
2582 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2583 &sc->bge_cdata.bge_stats_tag,
2584 (uint8_t **)&sc->bge_ldata.bge_stats,
2585 &sc->bge_cdata.bge_stats_map,
2586 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2587 if (error)
2588 return (error);
2589
2590 /* Create tag for jumbo RX ring. */
2591 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2592 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2593 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2594 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2595 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2596 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2597 if (error)
2598 return (error);
2599 }
2600
2601 /* Create parent tag for buffers. */
2602 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2603 /*
2604 * XXX
2605 * watchdog timeout issue was observed on BCM5704 which
2606 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2607 * Both limiting DMA address space to 32bits and flushing
2608 * mailbox write seem to address the issue.
2609 */
2610 if (sc->bge_pcixcap != 0)
2611 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2612 }
2613 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr,
2614 BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
2615 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2616 &sc->bge_cdata.bge_buffer_tag);
2617 if (error != 0) {
2618 device_printf(sc->bge_dev,
2619 "could not allocate buffer dma tag\n");
2620 return (ENOMEM);
2621 }
2622 /* Create tag for Tx mbufs. */
2623 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2624 txsegsz = BGE_TSOSEG_SZ;
2625 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2626 } else {
2627 txsegsz = MCLBYTES;
2628 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2629 }
2630 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2631 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2632 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2633 &sc->bge_cdata.bge_tx_mtag);
2634
2635 if (error) {
2636 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2637 return (ENOMEM);
2638 }
2639
2640 /* Create tag for Rx mbufs. */
2641 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2642 rxmaxsegsz = MJUM9BYTES;
2643 else
2644 rxmaxsegsz = MCLBYTES;
2645 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2646 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2647 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2648
2649 if (error) {
2650 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2651 return (ENOMEM);
2652 }
2653
2654 /* Create DMA maps for RX buffers. */
2655 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2656 &sc->bge_cdata.bge_rx_std_sparemap);
2657 if (error) {
2658 device_printf(sc->bge_dev,
2659 "can't create spare DMA map for RX\n");
2660 return (ENOMEM);
2661 }
2662 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2663 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2664 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2665 if (error) {
2666 device_printf(sc->bge_dev,
2667 "can't create DMA map for RX\n");
2668 return (ENOMEM);
2669 }
2670 }
2671
2672 /* Create DMA maps for TX buffers. */
2673 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2674 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2675 &sc->bge_cdata.bge_tx_dmamap[i]);
2676 if (error) {
2677 device_printf(sc->bge_dev,
2678 "can't create DMA map for TX\n");
2679 return (ENOMEM);
2680 }
2681 }
2682
2683 /* Create tags for jumbo RX buffers. */
2684 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2685 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2686 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2687 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2688 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2689 if (error) {
2690 device_printf(sc->bge_dev,
2691 "could not allocate jumbo dma tag\n");
2692 return (ENOMEM);
2693 }
2694 /* Create DMA maps for jumbo RX buffers. */
2695 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2696 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2697 if (error) {
2698 device_printf(sc->bge_dev,
2699 "can't create spare DMA map for jumbo RX\n");
2700 return (ENOMEM);
2701 }
2702 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2703 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2704 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2705 if (error) {
2706 device_printf(sc->bge_dev,
2707 "can't create DMA map for jumbo RX\n");
2708 return (ENOMEM);
2709 }
2710 }
2711 }
2712
2713 return (0);
2714}
2715
2716/*
2717 * Return true if this device has more than one port.
2718 */
2719static int
2720bge_has_multiple_ports(struct bge_softc *sc)
2721{
2722 device_t dev = sc->bge_dev;
2723 u_int b, d, f, fscan, s;
2724
2725 d = pci_get_domain(dev);
2726 b = pci_get_bus(dev);
2727 s = pci_get_slot(dev);
2728 f = pci_get_function(dev);
2729 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2730 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2731 return (1);
2732 return (0);
2733}
2734
2735/*
2736 * Return true if MSI can be used with this device.
2737 */
2738static int
2739bge_can_use_msi(struct bge_softc *sc)
2740{
2741 int can_use_msi = 0;
2742
2743 if (sc->bge_msi == 0)
2744 return (0);
2745
2746 /* Disable MSI for polling(4). */
2747#ifdef DEVICE_POLLING
2748 return (0);
2749#endif
2750 switch (sc->bge_asicrev) {
2751 case BGE_ASICREV_BCM5714_A0:
2752 case BGE_ASICREV_BCM5714:
2753 /*
2754 * Apparently, MSI doesn't work when these chips are
2755 * configured in single-port mode.
2756 */
2757 if (bge_has_multiple_ports(sc))
2758 can_use_msi = 1;
2759 break;
2760 case BGE_ASICREV_BCM5750:
2761 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2762 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2763 can_use_msi = 1;
2764 break;
2765 default:
2766 if (BGE_IS_575X_PLUS(sc))
2767 can_use_msi = 1;
2768 }
2769 return (can_use_msi);
2770}
2771
2772static int
2773bge_mbox_reorder(struct bge_softc *sc)
2774{
2775 /* Lists of PCI bridges that are known to reorder mailbox writes. */
2776 static const struct mbox_reorder {
2777 const uint16_t vendor;
2778 const uint16_t device;
2779 const char *desc;
2780 } const mbox_reorder_lists[] = {
2781 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
2782 };
2783 devclass_t pci, pcib;
2784 device_t bus, dev;
2785 int i;
2786
2787 pci = devclass_find("pci");
2788 pcib = devclass_find("pcib");
2789 dev = sc->bge_dev;
2790 bus = device_get_parent(dev);
2791 for (;;) {
2792 dev = device_get_parent(bus);
2793 bus = device_get_parent(dev);
2794 if (device_get_devclass(dev) != pcib)
2795 break;
2796 for (i = 0; i < nitems(mbox_reorder_lists); i++) {
2797 if (pci_get_vendor(dev) ==
2798 mbox_reorder_lists[i].vendor &&
2799 pci_get_device(dev) ==
2800 mbox_reorder_lists[i].device) {
2801 device_printf(sc->bge_dev,
2802 "enabling MBOX workaround for %s\n",
2803 mbox_reorder_lists[i].desc);
2804 return (1);
2805 }
2806 }
2807 if (device_get_devclass(bus) != pci)
2808 break;
2809 }
2810 return (0);
2811}
2812
2813static void
2814bge_devinfo(struct bge_softc *sc)
2815{
2816 uint32_t cfg, clk;
2817
2818 device_printf(sc->bge_dev,
2819 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
2820 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev);
2821 if (sc->bge_flags & BGE_FLAG_PCIE)
2822 printf("PCI-E\n");
2823 else if (sc->bge_flags & BGE_FLAG_PCIX) {
2824 printf("PCI-X ");
2825 cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
2826 if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE)
2827 clk = 133;
2828 else {
2829 clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
2830 switch (clk) {
2831 case 0:
2832 clk = 33;
2833 break;
2834 case 2:
2835 clk = 50;
2836 break;
2837 case 4:
2838 clk = 66;
2839 break;
2840 case 6:
2841 clk = 100;
2842 break;
2843 case 7:
2844 clk = 133;
2845 break;
2846 }
2847 }
2848 printf("%u MHz\n", clk);
2849 } else {
2850 if (sc->bge_pcixcap != 0)
2851 printf("PCI on PCI-X ");
2852 else
2853 printf("PCI ");
2854 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
2855 if (cfg & BGE_PCISTATE_PCI_BUSSPEED)
2856 clk = 66;
2857 else
2858 clk = 33;
2859 if (cfg & BGE_PCISTATE_32BIT_BUS)
2860 printf("%u MHz; 32bit\n", clk);
2861 else
2862 printf("%u MHz; 64bit\n", clk);
2863 }
2864}
2865
2866static int
2867bge_attach(device_t dev)
2868{
2869 struct ifnet *ifp;
2870 struct bge_softc *sc;
2871 uint32_t hwcfg = 0, misccfg;
2872 u_char eaddr[ETHER_ADDR_LEN];
2873 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2874
2875 sc = device_get_softc(dev);
2876 sc->bge_dev = dev;
2877
2878 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2879 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2880 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2881
2882 /*
2883 * Map control/status registers.
2884 */
2885 pci_enable_busmaster(dev);
2886
2887 rid = PCIR_BAR(0);
2888 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2889 RF_ACTIVE);
2890
2891 if (sc->bge_res == NULL) {
2892 device_printf (sc->bge_dev, "couldn't map memory\n");
2893 error = ENXIO;
2894 goto fail;
2895 }
2896
2897 /* Save various chip information. */
2898 sc->bge_chipid =
2899 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2900 BGE_PCIMISCCTL_ASICREV_SHIFT;
2901 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2902 /*
2903 * Find the ASCI revision. Different chips use different
2904 * registers.
2905 */
2906 switch (pci_get_device(dev)) {
2907 case BCOM_DEVICEID_BCM5717:
2908 case BCOM_DEVICEID_BCM5718:
2909 case BCOM_DEVICEID_BCM5719:
2910 case BCOM_DEVICEID_BCM5720:
2911 sc->bge_chipid = pci_read_config(dev,
2912 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2913 break;
2914 case BCOM_DEVICEID_BCM57761:
2915 case BCOM_DEVICEID_BCM57765:
2916 case BCOM_DEVICEID_BCM57781:
2917 case BCOM_DEVICEID_BCM57785:
2918 case BCOM_DEVICEID_BCM57791:
2919 case BCOM_DEVICEID_BCM57795:
2920 sc->bge_chipid = pci_read_config(dev,
2921 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2922 break;
2923 default:
2924 sc->bge_chipid = pci_read_config(dev,
2925 BGE_PCI_PRODID_ASICREV, 4);
2926 }
2927 }
2928 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2929 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2930
2931 /* Set default PHY address. */
2932 phy_addr = 1;
2933 /*
2934 * PHY address mapping for various devices.
2935 *
2936 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2937 * ---------+-------+-------+-------+-------+
2938 * BCM57XX | 1 | X | X | X |
2939 * BCM5704 | 1 | X | 1 | X |
2940 * BCM5717 | 1 | 8 | 2 | 9 |
2941 * BCM5719 | 1 | 8 | 2 | 9 |
2942 * BCM5720 | 1 | 8 | 2 | 9 |
2943 *
2944 * Other addresses may respond but they are not
2945 * IEEE compliant PHYs and should be ignored.
2946 */
2947 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2948 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2949 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2950 f = pci_get_function(dev);
2951 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2952 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2953 BGE_SGDIGSTS_IS_SERDES)
2954 phy_addr = f + 8;
2955 else
2956 phy_addr = f + 1;
2957 } else {
2958 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2959 BGE_CPMU_PHY_STRAP_IS_SERDES)
2960 phy_addr = f + 8;
2961 else
2962 phy_addr = f + 1;
2963 }
2964 }
2965
2966 /*
2967 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2968 * 5705 A0 and A1 chips.
2969 */
2970 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2971 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2972 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2973 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2974 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2975 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2976
2977 if (bge_has_eaddr(sc))
2978 sc->bge_flags |= BGE_FLAG_EADDR;
2979
2980 /* Save chipset family. */
2981 switch (sc->bge_asicrev) {
2982 case BGE_ASICREV_BCM5717:
2983 case BGE_ASICREV_BCM5719:
2984 case BGE_ASICREV_BCM5720:
2985 case BGE_ASICREV_BCM57765:
2986 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2987 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2988 BGE_FLAG_JUMBO_FRAME;
2989 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2990 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2991 /* Jumbo frame on BCM5719 A0 does not work. */
2992 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2993 }
2994 break;
2995 case BGE_ASICREV_BCM5755:
2996 case BGE_ASICREV_BCM5761:
2997 case BGE_ASICREV_BCM5784:
2998 case BGE_ASICREV_BCM5785:
2999 case BGE_ASICREV_BCM5787:
3000 case BGE_ASICREV_BCM57780:
3001 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
3002 BGE_FLAG_5705_PLUS;
3003 break;
3004 case BGE_ASICREV_BCM5700:
3005 case BGE_ASICREV_BCM5701:
3006 case BGE_ASICREV_BCM5703:
3007 case BGE_ASICREV_BCM5704:
3008 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
3009 break;
3010 case BGE_ASICREV_BCM5714_A0:
3011 case BGE_ASICREV_BCM5780:
3012 case BGE_ASICREV_BCM5714:
3013 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
3014 /* FALLTHROUGH */
3015 case BGE_ASICREV_BCM5750:
3016 case BGE_ASICREV_BCM5752:
3017 case BGE_ASICREV_BCM5906:
3018 sc->bge_flags |= BGE_FLAG_575X_PLUS;
3019 /* FALLTHROUGH */
3020 case BGE_ASICREV_BCM5705:
3021 sc->bge_flags |= BGE_FLAG_5705_PLUS;
3022 break;
3023 }
3024
3025 /* Add SYSCTLs, requires the chipset family to be set. */
3026 bge_add_sysctls(sc);
3027
3028 /* Set various PHY bug flags. */
3029 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3030 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3031 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
3032 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
3033 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
3034 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
3035 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3036 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
3037 if (pci_get_subvendor(dev) == DELL_VENDORID)
3038 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
3039 if ((BGE_IS_5705_PLUS(sc)) &&
3040 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
3041 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3042 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
3043 sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
3044 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
3045 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
3046 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
3047 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
3048 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3049 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3050 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
3051 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
3052 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
3053 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
3054 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
3055 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
3056 } else
3057 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
3058 }
3059
3060 /* Identify the chips that use an CPMU. */
3061 if (BGE_IS_5717_PLUS(sc) ||
3062 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3063 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3064 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
3065 sc->bge_asicrev == BGE_ASICREV_BCM57780)
3066 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
3067 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
3068 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
3069 else
3070 sc->bge_mi_mode = BGE_MIMODE_BASE;
3071 /* Enable auto polling for BCM570[0-5]. */
3072 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
3073 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
3074
3075 /*
3076 * All Broadcom controllers have 4GB boundary DMA bug.
3077 * Whenever an address crosses a multiple of the 4GB boundary
3078 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3079 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3080 * state machine will lockup and cause the device to hang.
3081 */
3082 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
3083
3084 /* BCM5755 or higher and BCM5906 have short DMA bug. */
3085 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3086 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
3087
3088 /*
3089 * BCM5719 cannot handle DMA requests for DMA segments that
3090 * have larger than 4KB in size. However the maximum DMA
3091 * segment size created in DMA tag is 4KB for TSO, so we
3092 * wouldn't encounter the issue here.
3093 */
3094 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3095 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3096
3097 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3098 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3099 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3100 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3101 sc->bge_flags |= BGE_FLAG_5788;
3102 }
3103
3104 capmask = BMSR_DEFCAPMASK;
3105 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3106 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3107 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3108 pci_get_vendor(dev) == BCOM_VENDORID &&
3109 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3110 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3111 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3112 (pci_get_vendor(dev) == BCOM_VENDORID &&
3113 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3114 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3115 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3116 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3117 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3118 /* These chips are 10/100 only. */
3119 capmask &= ~BMSR_EXTSTAT;
3120 }
3121
3122 /*
3123 * Some controllers seem to require a special firmware to use
3124 * TSO. But the firmware is not available to FreeBSD and Linux
3125 * claims that the TSO performed by the firmware is slower than
3126 * hardware based TSO. Moreover the firmware based TSO has one
3127 * known bug which can't handle TSO if ethernet header + IP/TCP
3128 * header is greater than 80 bytes. The workaround for the TSO
3129 * bug exist but it seems it's too expensive than not using
3130 * TSO at all. Some hardwares also have the TSO bug so limit
3131 * the TSO to the controllers that are not affected TSO issues
3132 * (e.g. 5755 or higher).
3133 */
3134 if (BGE_IS_5717_PLUS(sc)) {
3135 /* BCM5717 requires different TSO configuration. */
3136 sc->bge_flags |= BGE_FLAG_TSO3;
3137 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3138 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3139 /* TSO on BCM5719 A0 does not work. */
3140 sc->bge_flags &= ~BGE_FLAG_TSO3;
3141 }
3142 } else if (BGE_IS_5755_PLUS(sc)) {
3143 /*
3144 * BCM5754 and BCM5787 shares the same ASIC id so
3145 * explicit device id check is required.
3146 * Due to unknown reason TSO does not work on BCM5755M.
3147 */
3148 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3149 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3150 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3151 sc->bge_flags |= BGE_FLAG_TSO;
3152 }
3153
3154 /*
3155 * Check if this is a PCI-X or PCI Express device.
3156 */
3157 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3158 /*
3159 * Found a PCI Express capabilities register, this
3160 * must be a PCI Express device.
3161 */
3162 sc->bge_flags |= BGE_FLAG_PCIE;
3163 sc->bge_expcap = reg;
1473 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1474 if (BGE_IS_5714_FAMILY(sc)) {
1475 /* 256 bytes for read and write. */
1476 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1477 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1478 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1479 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1480 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1481 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1482 /*
1483 * In the BCM5703, the DMA read watermark should
1484 * be set to less than or equal to the maximum
1485 * memory read byte count of the PCI-X command
1486 * register.
1487 */
1488 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1489 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1490 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1491 /* 1536 bytes for read, 384 bytes for write. */
1492 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1493 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1494 } else {
1495 /* 384 bytes for read and write. */
1496 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1497 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1498 0x0F;
1499 }
1500 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1501 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1502 uint32_t tmp;
1503
1504 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1505 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1506 if (tmp == 6 || tmp == 7)
1507 dma_rw_ctl |=
1508 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1509
1510 /* Set PCI-X DMA write workaround. */
1511 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1512 }
1513 } else {
1514 /* Conventional PCI bus: 256 bytes for read and write. */
1515 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1516 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1517
1518 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1519 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1520 dma_rw_ctl |= 0x0F;
1521 }
1522 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1523 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1524 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1525 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1526 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1527 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1528 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1529 if (BGE_IS_5717_PLUS(sc)) {
1530 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1531 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1532 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1533 /*
1534 * Enable HW workaround for controllers that misinterpret
1535 * a status tag update and leave interrupts permanently
1536 * disabled.
1537 */
1538 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1539 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1540 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1541 }
1542 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1543
1544 /*
1545 * Set up general mode register.
1546 */
1547 mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1548 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1549
1550 /*
1551 * BCM5701 B5 have a bug causing data corruption when using
1552 * 64-bit DMA reads, which can be terminated early and then
1553 * completed later as 32-bit accesses, in combination with
1554 * certain bridges.
1555 */
1556 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1557 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1558 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1559
1560 /*
1561 * Tell the firmware the driver is running
1562 */
1563 if (sc->bge_asf_mode & ASF_STACKUP)
1564 mode_ctl |= BGE_MODECTL_STACKUP;
1565
1566 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1567
1568 /*
1569 * Disable memory write invalidate. Apparently it is not supported
1570 * properly by these devices. Also ensure that INTx isn't disabled,
1571 * as these chips need it even when using MSI.
1572 */
1573 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1574 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1575
1576 /* Set the timer prescaler (always 66Mhz) */
1577 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1578
1579 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1580 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1581 DELAY(40); /* XXX */
1582
1583 /* Put PHY into ready state */
1584 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1585 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1586 DELAY(40);
1587 }
1588
1589 return (0);
1590}
1591
1592static int
1593bge_blockinit(struct bge_softc *sc)
1594{
1595 struct bge_rcb *rcb;
1596 bus_size_t vrcb;
1597 bge_hostaddr taddr;
1598 uint32_t dmactl, val;
1599 int i, limit;
1600
1601 /*
1602 * Initialize the memory window pointer register so that
1603 * we can access the first 32K of internal NIC RAM. This will
1604 * allow us to set up the TX send ring RCBs and the RX return
1605 * ring RCBs, plus other things which live in NIC memory.
1606 */
1607 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1608
1609 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1610
1611 if (!(BGE_IS_5705_PLUS(sc))) {
1612 /* Configure mbuf memory pool */
1613 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1614 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1615 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1616 else
1617 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1618
1619 /* Configure DMA resource pool */
1620 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1621 BGE_DMA_DESCRIPTORS);
1622 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1623 }
1624
1625 /* Configure mbuf pool watermarks */
1626 if (BGE_IS_5717_PLUS(sc)) {
1627 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1628 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1629 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1630 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1631 } else {
1632 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1633 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1634 }
1635 } else if (!BGE_IS_5705_PLUS(sc)) {
1636 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1637 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1638 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1639 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1640 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1641 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1642 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1643 } else {
1644 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1645 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1646 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1647 }
1648
1649 /* Configure DMA resource watermarks */
1650 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1651 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1652
1653 /* Enable buffer manager */
1654 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1655 /*
1656 * Change the arbitration algorithm of TXMBUF read request to
1657 * round-robin instead of priority based for BCM5719. When
1658 * TXFIFO is almost empty, RDMA will hold its request until
1659 * TXFIFO is not almost empty.
1660 */
1661 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1662 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1663 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1664
1665 /* Poll for buffer manager start indication */
1666 for (i = 0; i < BGE_TIMEOUT; i++) {
1667 DELAY(10);
1668 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1669 break;
1670 }
1671
1672 if (i == BGE_TIMEOUT) {
1673 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1674 return (ENXIO);
1675 }
1676
1677 /* Enable flow-through queues */
1678 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1679 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1680
1681 /* Wait until queue initialization is complete */
1682 for (i = 0; i < BGE_TIMEOUT; i++) {
1683 DELAY(10);
1684 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1685 break;
1686 }
1687
1688 if (i == BGE_TIMEOUT) {
1689 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1690 return (ENXIO);
1691 }
1692
1693 /*
1694 * Summary of rings supported by the controller:
1695 *
1696 * Standard Receive Producer Ring
1697 * - This ring is used to feed receive buffers for "standard"
1698 * sized frames (typically 1536 bytes) to the controller.
1699 *
1700 * Jumbo Receive Producer Ring
1701 * - This ring is used to feed receive buffers for jumbo sized
1702 * frames (i.e. anything bigger than the "standard" frames)
1703 * to the controller.
1704 *
1705 * Mini Receive Producer Ring
1706 * - This ring is used to feed receive buffers for "mini"
1707 * sized frames to the controller.
1708 * - This feature required external memory for the controller
1709 * but was never used in a production system. Should always
1710 * be disabled.
1711 *
1712 * Receive Return Ring
1713 * - After the controller has placed an incoming frame into a
1714 * receive buffer that buffer is moved into a receive return
1715 * ring. The driver is then responsible to passing the
1716 * buffer up to the stack. Many versions of the controller
1717 * support multiple RR rings.
1718 *
1719 * Send Ring
1720 * - This ring is used for outgoing frames. Many versions of
1721 * the controller support multiple send rings.
1722 */
1723
1724 /* Initialize the standard receive producer ring control block. */
1725 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1726 rcb->bge_hostaddr.bge_addr_lo =
1727 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1728 rcb->bge_hostaddr.bge_addr_hi =
1729 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1730 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1731 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1732 if (BGE_IS_5717_PLUS(sc)) {
1733 /*
1734 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1735 * Bits 15-2 : Maximum RX frame size
1736 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1737 * Bit 0 : Reserved
1738 */
1739 rcb->bge_maxlen_flags =
1740 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1741 } else if (BGE_IS_5705_PLUS(sc)) {
1742 /*
1743 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1744 * Bits 15-2 : Reserved (should be 0)
1745 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1746 * Bit 0 : Reserved
1747 */
1748 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1749 } else {
1750 /*
1751 * Ring size is always XXX entries
1752 * Bits 31-16: Maximum RX frame size
1753 * Bits 15-2 : Reserved (should be 0)
1754 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1755 * Bit 0 : Reserved
1756 */
1757 rcb->bge_maxlen_flags =
1758 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1759 }
1760 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1761 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1762 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1763 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1764 else
1765 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1766 /* Write the standard receive producer ring control block. */
1767 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1768 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1769 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1770 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1771
1772 /* Reset the standard receive producer ring producer index. */
1773 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1774
1775 /*
1776 * Initialize the jumbo RX producer ring control
1777 * block. We set the 'ring disabled' bit in the
1778 * flags field until we're actually ready to start
1779 * using this ring (i.e. once we set the MTU
1780 * high enough to require it).
1781 */
1782 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1783 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1784 /* Get the jumbo receive producer ring RCB parameters. */
1785 rcb->bge_hostaddr.bge_addr_lo =
1786 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1787 rcb->bge_hostaddr.bge_addr_hi =
1788 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1789 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1790 sc->bge_cdata.bge_rx_jumbo_ring_map,
1791 BUS_DMASYNC_PREREAD);
1792 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1793 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1794 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1795 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1796 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1797 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1798 else
1799 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1800 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1801 rcb->bge_hostaddr.bge_addr_hi);
1802 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1803 rcb->bge_hostaddr.bge_addr_lo);
1804 /* Program the jumbo receive producer ring RCB parameters. */
1805 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1806 rcb->bge_maxlen_flags);
1807 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1808 /* Reset the jumbo receive producer ring producer index. */
1809 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1810 }
1811
1812 /* Disable the mini receive producer ring RCB. */
1813 if (BGE_IS_5700_FAMILY(sc)) {
1814 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1815 rcb->bge_maxlen_flags =
1816 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1817 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1818 rcb->bge_maxlen_flags);
1819 /* Reset the mini receive producer ring producer index. */
1820 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1821 }
1822
1823 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1824 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1825 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1826 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1827 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1828 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1829 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1830 }
1831 /*
1832 * The BD ring replenish thresholds control how often the
1833 * hardware fetches new BD's from the producer rings in host
1834 * memory. Setting the value too low on a busy system can
1835 * starve the hardware and recue the throughpout.
1836 *
1837 * Set the BD ring replentish thresholds. The recommended
1838 * values are 1/8th the number of descriptors allocated to
1839 * each ring.
1840 * XXX The 5754 requires a lower threshold, so it might be a
1841 * requirement of all 575x family chips. The Linux driver sets
1842 * the lower threshold for all 5705 family chips as well, but there
1843 * are reports that it might not need to be so strict.
1844 *
1845 * XXX Linux does some extra fiddling here for the 5906 parts as
1846 * well.
1847 */
1848 if (BGE_IS_5705_PLUS(sc))
1849 val = 8;
1850 else
1851 val = BGE_STD_RX_RING_CNT / 8;
1852 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1853 if (BGE_IS_JUMBO_CAPABLE(sc))
1854 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1855 BGE_JUMBO_RX_RING_CNT/8);
1856 if (BGE_IS_5717_PLUS(sc)) {
1857 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1858 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1859 }
1860
1861 /*
1862 * Disable all send rings by setting the 'ring disabled' bit
1863 * in the flags field of all the TX send ring control blocks,
1864 * located in NIC memory.
1865 */
1866 if (!BGE_IS_5705_PLUS(sc))
1867 /* 5700 to 5704 had 16 send rings. */
1868 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1869 else
1870 limit = 1;
1871 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1872 for (i = 0; i < limit; i++) {
1873 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1874 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1875 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1876 vrcb += sizeof(struct bge_rcb);
1877 }
1878
1879 /* Configure send ring RCB 0 (we use only the first ring) */
1880 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1881 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1882 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1883 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1884 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1885 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1886 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1887 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1888 else
1889 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1890 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1891 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1892 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1893
1894 /*
1895 * Disable all receive return rings by setting the
1896 * 'ring diabled' bit in the flags field of all the receive
1897 * return ring control blocks, located in NIC memory.
1898 */
1899 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1900 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1901 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
1902 /* Should be 17, use 16 until we get an SRAM map. */
1903 limit = 16;
1904 } else if (!BGE_IS_5705_PLUS(sc))
1905 limit = BGE_RX_RINGS_MAX;
1906 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1907 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1908 limit = 4;
1909 else
1910 limit = 1;
1911 /* Disable all receive return rings. */
1912 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1913 for (i = 0; i < limit; i++) {
1914 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1915 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1916 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1917 BGE_RCB_FLAG_RING_DISABLED);
1918 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1919 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1920 (i * (sizeof(uint64_t))), 0);
1921 vrcb += sizeof(struct bge_rcb);
1922 }
1923
1924 /*
1925 * Set up receive return ring 0. Note that the NIC address
1926 * for RX return rings is 0x0. The return rings live entirely
1927 * within the host, so the nicaddr field in the RCB isn't used.
1928 */
1929 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1930 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1931 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1932 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1933 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1934 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1935 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1936
1937 /* Set random backoff seed for TX */
1938 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1939 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1940 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1941 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1942 BGE_TX_BACKOFF_SEED_MASK);
1943
1944 /* Set inter-packet gap */
1945 val = 0x2620;
1946 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
1947 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1948 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1949 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1950
1951 /*
1952 * Specify which ring to use for packets that don't match
1953 * any RX rules.
1954 */
1955 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1956
1957 /*
1958 * Configure number of RX lists. One interrupt distribution
1959 * list, sixteen active lists, one bad frames class.
1960 */
1961 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1962
1963 /* Inialize RX list placement stats mask. */
1964 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1965 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1966
1967 /* Disable host coalescing until we get it set up */
1968 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1969
1970 /* Poll to make sure it's shut down. */
1971 for (i = 0; i < BGE_TIMEOUT; i++) {
1972 DELAY(10);
1973 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1974 break;
1975 }
1976
1977 if (i == BGE_TIMEOUT) {
1978 device_printf(sc->bge_dev,
1979 "host coalescing engine failed to idle\n");
1980 return (ENXIO);
1981 }
1982
1983 /* Set up host coalescing defaults */
1984 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1985 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1986 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1987 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1988 if (!(BGE_IS_5705_PLUS(sc))) {
1989 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1990 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1991 }
1992 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1993 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1994
1995 /* Set up address of statistics block */
1996 if (!(BGE_IS_5705_PLUS(sc))) {
1997 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1998 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1999 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
2000 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
2001 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2002 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2003 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2004 }
2005
2006 /* Set up address of status block */
2007 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
2008 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
2009 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
2010 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
2011
2012 /* Set up status block size. */
2013 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2014 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2015 val = BGE_STATBLKSZ_FULL;
2016 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2017 } else {
2018 val = BGE_STATBLKSZ_32BYTE;
2019 bzero(sc->bge_ldata.bge_status_block, 32);
2020 }
2021 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2022 sc->bge_cdata.bge_status_map,
2023 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2024
2025 /* Turn on host coalescing state machine */
2026 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2027
2028 /* Turn on RX BD completion state machine and enable attentions */
2029 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2030 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2031
2032 /* Turn on RX list placement state machine */
2033 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2034
2035 /* Turn on RX list selector state machine. */
2036 if (!(BGE_IS_5705_PLUS(sc)))
2037 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2038
2039 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2040 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2041 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2042 BGE_MACMODE_FRMHDR_DMA_ENB;
2043
2044 if (sc->bge_flags & BGE_FLAG_TBI)
2045 val |= BGE_PORTMODE_TBI;
2046 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2047 val |= BGE_PORTMODE_GMII;
2048 else
2049 val |= BGE_PORTMODE_MII;
2050
2051 /* Turn on DMA, clear stats */
2052 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2053 DELAY(40);
2054
2055 /* Set misc. local control, enable interrupts on attentions */
2056 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2057
2058#ifdef notdef
2059 /* Assert GPIO pins for PHY reset */
2060 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2061 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2062 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2063 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2064#endif
2065
2066 /* Turn on DMA completion state machine */
2067 if (!(BGE_IS_5705_PLUS(sc)))
2068 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2069
2070 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2071
2072 /* Enable host coalescing bug fix. */
2073 if (BGE_IS_5755_PLUS(sc))
2074 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2075
2076 /* Request larger DMA burst size to get better performance. */
2077 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2078 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2079
2080 /* Turn on write DMA state machine */
2081 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2082 DELAY(40);
2083
2084 /* Turn on read DMA state machine */
2085 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2086
2087 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2088 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2089
2090 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2091 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2092 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2093 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2094 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2095 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2096 if (sc->bge_flags & BGE_FLAG_PCIE)
2097 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2098 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2099 val |= BGE_RDMAMODE_TSO4_ENABLE;
2100 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2101 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2102 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2103 val |= BGE_RDMAMODE_TSO6_ENABLE;
2104 }
2105
2106 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2107 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2108 BGE_RDMAMODE_H2BNC_VLAN_DET;
2109 /*
2110 * Allow multiple outstanding read requests from
2111 * non-LSO read DMA engine.
2112 */
2113 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2114 }
2115
2116 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2117 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2118 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2119 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2120 BGE_IS_5717_PLUS(sc)) {
2121 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2122 /*
2123 * Adjust tx margin to prevent TX data corruption and
2124 * fix internal FIFO overflow.
2125 */
2126 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2127 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2128 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2129 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2130 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2131 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2132 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2133 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2134 }
2135 /*
2136 * Enable fix for read DMA FIFO overruns.
2137 * The fix is to limit the number of RX BDs
2138 * the hardware would fetch at a fime.
2139 */
2140 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2141 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2142 }
2143
2144 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2145 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2146 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2147 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2148 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2149 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2150 /*
2151 * Allow 4KB burst length reads for non-LSO frames.
2152 * Enable 512B burst length reads for buffer descriptors.
2153 */
2154 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2155 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2156 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2157 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2158 }
2159
2160 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2161 DELAY(40);
2162
2163 /* Turn on RX data completion state machine */
2164 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2165
2166 /* Turn on RX BD initiator state machine */
2167 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2168
2169 /* Turn on RX data and RX BD initiator state machine */
2170 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2171
2172 /* Turn on Mbuf cluster free state machine */
2173 if (!(BGE_IS_5705_PLUS(sc)))
2174 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2175
2176 /* Turn on send BD completion state machine */
2177 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2178
2179 /* Turn on send data completion state machine */
2180 val = BGE_SDCMODE_ENABLE;
2181 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2182 val |= BGE_SDCMODE_CDELAY;
2183 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2184
2185 /* Turn on send data initiator state machine */
2186 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2187 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2188 BGE_SDIMODE_HW_LSO_PRE_DMA);
2189 else
2190 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2191
2192 /* Turn on send BD initiator state machine */
2193 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2194
2195 /* Turn on send BD selector state machine */
2196 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2197
2198 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2199 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2200 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2201
2202 /* ack/clear link change events */
2203 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2204 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2205 BGE_MACSTAT_LINK_CHANGED);
2206 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2207
2208 /*
2209 * Enable attention when the link has changed state for
2210 * devices that use auto polling.
2211 */
2212 if (sc->bge_flags & BGE_FLAG_TBI) {
2213 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2214 } else {
2215 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2216 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2217 DELAY(80);
2218 }
2219 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2220 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2221 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2222 BGE_EVTENB_MI_INTERRUPT);
2223 }
2224
2225 /*
2226 * Clear any pending link state attention.
2227 * Otherwise some link state change events may be lost until attention
2228 * is cleared by bge_intr() -> bge_link_upd() sequence.
2229 * It's not necessary on newer BCM chips - perhaps enabling link
2230 * state change attentions implies clearing pending attention.
2231 */
2232 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2233 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2234 BGE_MACSTAT_LINK_CHANGED);
2235
2236 /* Enable link state change attentions. */
2237 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2238
2239 return (0);
2240}
2241
2242const struct bge_revision *
2243bge_lookup_rev(uint32_t chipid)
2244{
2245 const struct bge_revision *br;
2246
2247 for (br = bge_revisions; br->br_name != NULL; br++) {
2248 if (br->br_chipid == chipid)
2249 return (br);
2250 }
2251
2252 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2253 if (br->br_chipid == BGE_ASICREV(chipid))
2254 return (br);
2255 }
2256
2257 return (NULL);
2258}
2259
2260const struct bge_vendor *
2261bge_lookup_vendor(uint16_t vid)
2262{
2263 const struct bge_vendor *v;
2264
2265 for (v = bge_vendors; v->v_name != NULL; v++)
2266 if (v->v_id == vid)
2267 return (v);
2268
2269 panic("%s: unknown vendor %d", __func__, vid);
2270 return (NULL);
2271}
2272
2273/*
2274 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2275 * against our list and return its name if we find a match.
2276 *
2277 * Note that since the Broadcom controller contains VPD support, we
2278 * try to get the device name string from the controller itself instead
2279 * of the compiled-in string. It guarantees we'll always announce the
2280 * right product name. We fall back to the compiled-in string when
2281 * VPD is unavailable or corrupt.
2282 */
2283static int
2284bge_probe(device_t dev)
2285{
2286 char buf[96];
2287 char model[64];
2288 const struct bge_revision *br;
2289 const char *pname;
2290 struct bge_softc *sc = device_get_softc(dev);
2291 const struct bge_type *t = bge_devs;
2292 const struct bge_vendor *v;
2293 uint32_t id;
2294 uint16_t did, vid;
2295
2296 sc->bge_dev = dev;
2297 vid = pci_get_vendor(dev);
2298 did = pci_get_device(dev);
2299 while(t->bge_vid != 0) {
2300 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2301 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2302 BGE_PCIMISCCTL_ASICREV_SHIFT;
2303 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2304 /*
2305 * Find the ASCI revision. Different chips
2306 * use different registers.
2307 */
2308 switch (pci_get_device(dev)) {
2309 case BCOM_DEVICEID_BCM5717:
2310 case BCOM_DEVICEID_BCM5718:
2311 case BCOM_DEVICEID_BCM5719:
2312 case BCOM_DEVICEID_BCM5720:
2313 id = pci_read_config(dev,
2314 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2315 break;
2316 case BCOM_DEVICEID_BCM57761:
2317 case BCOM_DEVICEID_BCM57765:
2318 case BCOM_DEVICEID_BCM57781:
2319 case BCOM_DEVICEID_BCM57785:
2320 case BCOM_DEVICEID_BCM57791:
2321 case BCOM_DEVICEID_BCM57795:
2322 id = pci_read_config(dev,
2323 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2324 break;
2325 default:
2326 id = pci_read_config(dev,
2327 BGE_PCI_PRODID_ASICREV, 4);
2328 }
2329 }
2330 br = bge_lookup_rev(id);
2331 v = bge_lookup_vendor(vid);
2332 if (bge_has_eaddr(sc) &&
2333 pci_get_vpd_ident(dev, &pname) == 0)
2334 snprintf(model, 64, "%s", pname);
2335 else
2336 snprintf(model, 64, "%s %s", v->v_name,
2337 br != NULL ? br->br_name :
2338 "NetXtreme Ethernet Controller");
2339 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2340 br != NULL ? "" : "unknown ", id);
2341 device_set_desc_copy(dev, buf);
2342 return (0);
2343 }
2344 t++;
2345 }
2346
2347 return (ENXIO);
2348}
2349
2350static void
2351bge_dma_free(struct bge_softc *sc)
2352{
2353 int i;
2354
2355 /* Destroy DMA maps for RX buffers. */
2356 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2357 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2358 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2359 sc->bge_cdata.bge_rx_std_dmamap[i]);
2360 }
2361 if (sc->bge_cdata.bge_rx_std_sparemap)
2362 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2363 sc->bge_cdata.bge_rx_std_sparemap);
2364
2365 /* Destroy DMA maps for jumbo RX buffers. */
2366 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2367 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2368 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2369 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2370 }
2371 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2372 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2373 sc->bge_cdata.bge_rx_jumbo_sparemap);
2374
2375 /* Destroy DMA maps for TX buffers. */
2376 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2377 if (sc->bge_cdata.bge_tx_dmamap[i])
2378 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2379 sc->bge_cdata.bge_tx_dmamap[i]);
2380 }
2381
2382 if (sc->bge_cdata.bge_rx_mtag)
2383 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2384 if (sc->bge_cdata.bge_mtag_jumbo)
2385 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2386 if (sc->bge_cdata.bge_tx_mtag)
2387 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2388
2389 /* Destroy standard RX ring. */
2390 if (sc->bge_cdata.bge_rx_std_ring_map)
2391 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2392 sc->bge_cdata.bge_rx_std_ring_map);
2393 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2394 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2395 sc->bge_ldata.bge_rx_std_ring,
2396 sc->bge_cdata.bge_rx_std_ring_map);
2397
2398 if (sc->bge_cdata.bge_rx_std_ring_tag)
2399 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2400
2401 /* Destroy jumbo RX ring. */
2402 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2403 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2404 sc->bge_cdata.bge_rx_jumbo_ring_map);
2405
2406 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2407 sc->bge_ldata.bge_rx_jumbo_ring)
2408 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2409 sc->bge_ldata.bge_rx_jumbo_ring,
2410 sc->bge_cdata.bge_rx_jumbo_ring_map);
2411
2412 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2413 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2414
2415 /* Destroy RX return ring. */
2416 if (sc->bge_cdata.bge_rx_return_ring_map)
2417 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2418 sc->bge_cdata.bge_rx_return_ring_map);
2419
2420 if (sc->bge_cdata.bge_rx_return_ring_map &&
2421 sc->bge_ldata.bge_rx_return_ring)
2422 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2423 sc->bge_ldata.bge_rx_return_ring,
2424 sc->bge_cdata.bge_rx_return_ring_map);
2425
2426 if (sc->bge_cdata.bge_rx_return_ring_tag)
2427 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2428
2429 /* Destroy TX ring. */
2430 if (sc->bge_cdata.bge_tx_ring_map)
2431 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2432 sc->bge_cdata.bge_tx_ring_map);
2433
2434 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2435 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2436 sc->bge_ldata.bge_tx_ring,
2437 sc->bge_cdata.bge_tx_ring_map);
2438
2439 if (sc->bge_cdata.bge_tx_ring_tag)
2440 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2441
2442 /* Destroy status block. */
2443 if (sc->bge_cdata.bge_status_map)
2444 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2445 sc->bge_cdata.bge_status_map);
2446
2447 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2448 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2449 sc->bge_ldata.bge_status_block,
2450 sc->bge_cdata.bge_status_map);
2451
2452 if (sc->bge_cdata.bge_status_tag)
2453 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2454
2455 /* Destroy statistics block. */
2456 if (sc->bge_cdata.bge_stats_map)
2457 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2458 sc->bge_cdata.bge_stats_map);
2459
2460 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2461 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2462 sc->bge_ldata.bge_stats,
2463 sc->bge_cdata.bge_stats_map);
2464
2465 if (sc->bge_cdata.bge_stats_tag)
2466 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2467
2468 if (sc->bge_cdata.bge_buffer_tag)
2469 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2470
2471 /* Destroy the parent tag. */
2472 if (sc->bge_cdata.bge_parent_tag)
2473 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2474}
2475
2476static int
2477bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2478 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2479 bus_addr_t *paddr, const char *msg)
2480{
2481 struct bge_dmamap_arg ctx;
2482 int error;
2483
2484 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2485 alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2486 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2487 if (error != 0) {
2488 device_printf(sc->bge_dev,
2489 "could not create %s dma tag\n", msg);
2490 return (ENOMEM);
2491 }
2492 /* Allocate DMA'able memory for ring. */
2493 error = bus_dmamem_alloc(*tag, (void **)ring,
2494 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2495 if (error != 0) {
2496 device_printf(sc->bge_dev,
2497 "could not allocate DMA'able memory for %s\n", msg);
2498 return (ENOMEM);
2499 }
2500 /* Load the address of the ring. */
2501 ctx.bge_busaddr = 0;
2502 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2503 &ctx, BUS_DMA_NOWAIT);
2504 if (error != 0) {
2505 device_printf(sc->bge_dev,
2506 "could not load DMA'able memory for %s\n", msg);
2507 return (ENOMEM);
2508 }
2509 *paddr = ctx.bge_busaddr;
2510 return (0);
2511}
2512
2513static int
2514bge_dma_alloc(struct bge_softc *sc)
2515{
2516 bus_addr_t lowaddr;
2517 bus_size_t rxmaxsegsz, sbsz, txsegsz, txmaxsegsz;
2518 int i, error;
2519
2520 lowaddr = BUS_SPACE_MAXADDR;
2521 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2522 lowaddr = BGE_DMA_MAXADDR;
2523 /*
2524 * Allocate the parent bus DMA tag appropriate for PCI.
2525 */
2526 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2527 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2528 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2529 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2530 if (error != 0) {
2531 device_printf(sc->bge_dev,
2532 "could not allocate parent dma tag\n");
2533 return (ENOMEM);
2534 }
2535
2536 /* Create tag for standard RX ring. */
2537 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2538 &sc->bge_cdata.bge_rx_std_ring_tag,
2539 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2540 &sc->bge_cdata.bge_rx_std_ring_map,
2541 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2542 if (error)
2543 return (error);
2544
2545 /* Create tag for RX return ring. */
2546 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2547 &sc->bge_cdata.bge_rx_return_ring_tag,
2548 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2549 &sc->bge_cdata.bge_rx_return_ring_map,
2550 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2551 if (error)
2552 return (error);
2553
2554 /* Create tag for TX ring. */
2555 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2556 &sc->bge_cdata.bge_tx_ring_tag,
2557 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2558 &sc->bge_cdata.bge_tx_ring_map,
2559 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2560 if (error)
2561 return (error);
2562
2563 /*
2564 * Create tag for status block.
2565 * Because we only use single Tx/Rx/Rx return ring, use
2566 * minimum status block size except BCM5700 AX/BX which
2567 * seems to want to see full status block size regardless
2568 * of configured number of ring.
2569 */
2570 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2571 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2572 sbsz = BGE_STATUS_BLK_SZ;
2573 else
2574 sbsz = 32;
2575 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2576 &sc->bge_cdata.bge_status_tag,
2577 (uint8_t **)&sc->bge_ldata.bge_status_block,
2578 &sc->bge_cdata.bge_status_map,
2579 &sc->bge_ldata.bge_status_block_paddr, "status block");
2580 if (error)
2581 return (error);
2582
2583 /* Create tag for statistics block. */
2584 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2585 &sc->bge_cdata.bge_stats_tag,
2586 (uint8_t **)&sc->bge_ldata.bge_stats,
2587 &sc->bge_cdata.bge_stats_map,
2588 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2589 if (error)
2590 return (error);
2591
2592 /* Create tag for jumbo RX ring. */
2593 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2594 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2595 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2596 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2597 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2598 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2599 if (error)
2600 return (error);
2601 }
2602
2603 /* Create parent tag for buffers. */
2604 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2605 /*
2606 * XXX
2607 * watchdog timeout issue was observed on BCM5704 which
2608 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2609 * Both limiting DMA address space to 32bits and flushing
2610 * mailbox write seem to address the issue.
2611 */
2612 if (sc->bge_pcixcap != 0)
2613 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2614 }
2615 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr,
2616 BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
2617 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2618 &sc->bge_cdata.bge_buffer_tag);
2619 if (error != 0) {
2620 device_printf(sc->bge_dev,
2621 "could not allocate buffer dma tag\n");
2622 return (ENOMEM);
2623 }
2624 /* Create tag for Tx mbufs. */
2625 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2626 txsegsz = BGE_TSOSEG_SZ;
2627 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2628 } else {
2629 txsegsz = MCLBYTES;
2630 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2631 }
2632 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2633 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2634 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2635 &sc->bge_cdata.bge_tx_mtag);
2636
2637 if (error) {
2638 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2639 return (ENOMEM);
2640 }
2641
2642 /* Create tag for Rx mbufs. */
2643 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2644 rxmaxsegsz = MJUM9BYTES;
2645 else
2646 rxmaxsegsz = MCLBYTES;
2647 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2648 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2649 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2650
2651 if (error) {
2652 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2653 return (ENOMEM);
2654 }
2655
2656 /* Create DMA maps for RX buffers. */
2657 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2658 &sc->bge_cdata.bge_rx_std_sparemap);
2659 if (error) {
2660 device_printf(sc->bge_dev,
2661 "can't create spare DMA map for RX\n");
2662 return (ENOMEM);
2663 }
2664 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2665 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2666 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2667 if (error) {
2668 device_printf(sc->bge_dev,
2669 "can't create DMA map for RX\n");
2670 return (ENOMEM);
2671 }
2672 }
2673
2674 /* Create DMA maps for TX buffers. */
2675 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2676 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2677 &sc->bge_cdata.bge_tx_dmamap[i]);
2678 if (error) {
2679 device_printf(sc->bge_dev,
2680 "can't create DMA map for TX\n");
2681 return (ENOMEM);
2682 }
2683 }
2684
2685 /* Create tags for jumbo RX buffers. */
2686 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2687 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2688 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2689 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2690 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2691 if (error) {
2692 device_printf(sc->bge_dev,
2693 "could not allocate jumbo dma tag\n");
2694 return (ENOMEM);
2695 }
2696 /* Create DMA maps for jumbo RX buffers. */
2697 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2698 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2699 if (error) {
2700 device_printf(sc->bge_dev,
2701 "can't create spare DMA map for jumbo RX\n");
2702 return (ENOMEM);
2703 }
2704 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2705 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2706 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2707 if (error) {
2708 device_printf(sc->bge_dev,
2709 "can't create DMA map for jumbo RX\n");
2710 return (ENOMEM);
2711 }
2712 }
2713 }
2714
2715 return (0);
2716}
2717
2718/*
2719 * Return true if this device has more than one port.
2720 */
2721static int
2722bge_has_multiple_ports(struct bge_softc *sc)
2723{
2724 device_t dev = sc->bge_dev;
2725 u_int b, d, f, fscan, s;
2726
2727 d = pci_get_domain(dev);
2728 b = pci_get_bus(dev);
2729 s = pci_get_slot(dev);
2730 f = pci_get_function(dev);
2731 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2732 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2733 return (1);
2734 return (0);
2735}
2736
2737/*
2738 * Return true if MSI can be used with this device.
2739 */
2740static int
2741bge_can_use_msi(struct bge_softc *sc)
2742{
2743 int can_use_msi = 0;
2744
2745 if (sc->bge_msi == 0)
2746 return (0);
2747
2748 /* Disable MSI for polling(4). */
2749#ifdef DEVICE_POLLING
2750 return (0);
2751#endif
2752 switch (sc->bge_asicrev) {
2753 case BGE_ASICREV_BCM5714_A0:
2754 case BGE_ASICREV_BCM5714:
2755 /*
2756 * Apparently, MSI doesn't work when these chips are
2757 * configured in single-port mode.
2758 */
2759 if (bge_has_multiple_ports(sc))
2760 can_use_msi = 1;
2761 break;
2762 case BGE_ASICREV_BCM5750:
2763 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2764 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2765 can_use_msi = 1;
2766 break;
2767 default:
2768 if (BGE_IS_575X_PLUS(sc))
2769 can_use_msi = 1;
2770 }
2771 return (can_use_msi);
2772}
2773
2774static int
2775bge_mbox_reorder(struct bge_softc *sc)
2776{
2777 /* Lists of PCI bridges that are known to reorder mailbox writes. */
2778 static const struct mbox_reorder {
2779 const uint16_t vendor;
2780 const uint16_t device;
2781 const char *desc;
2782 } const mbox_reorder_lists[] = {
2783 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
2784 };
2785 devclass_t pci, pcib;
2786 device_t bus, dev;
2787 int i;
2788
2789 pci = devclass_find("pci");
2790 pcib = devclass_find("pcib");
2791 dev = sc->bge_dev;
2792 bus = device_get_parent(dev);
2793 for (;;) {
2794 dev = device_get_parent(bus);
2795 bus = device_get_parent(dev);
2796 if (device_get_devclass(dev) != pcib)
2797 break;
2798 for (i = 0; i < nitems(mbox_reorder_lists); i++) {
2799 if (pci_get_vendor(dev) ==
2800 mbox_reorder_lists[i].vendor &&
2801 pci_get_device(dev) ==
2802 mbox_reorder_lists[i].device) {
2803 device_printf(sc->bge_dev,
2804 "enabling MBOX workaround for %s\n",
2805 mbox_reorder_lists[i].desc);
2806 return (1);
2807 }
2808 }
2809 if (device_get_devclass(bus) != pci)
2810 break;
2811 }
2812 return (0);
2813}
2814
2815static void
2816bge_devinfo(struct bge_softc *sc)
2817{
2818 uint32_t cfg, clk;
2819
2820 device_printf(sc->bge_dev,
2821 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
2822 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev);
2823 if (sc->bge_flags & BGE_FLAG_PCIE)
2824 printf("PCI-E\n");
2825 else if (sc->bge_flags & BGE_FLAG_PCIX) {
2826 printf("PCI-X ");
2827 cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
2828 if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE)
2829 clk = 133;
2830 else {
2831 clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
2832 switch (clk) {
2833 case 0:
2834 clk = 33;
2835 break;
2836 case 2:
2837 clk = 50;
2838 break;
2839 case 4:
2840 clk = 66;
2841 break;
2842 case 6:
2843 clk = 100;
2844 break;
2845 case 7:
2846 clk = 133;
2847 break;
2848 }
2849 }
2850 printf("%u MHz\n", clk);
2851 } else {
2852 if (sc->bge_pcixcap != 0)
2853 printf("PCI on PCI-X ");
2854 else
2855 printf("PCI ");
2856 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
2857 if (cfg & BGE_PCISTATE_PCI_BUSSPEED)
2858 clk = 66;
2859 else
2860 clk = 33;
2861 if (cfg & BGE_PCISTATE_32BIT_BUS)
2862 printf("%u MHz; 32bit\n", clk);
2863 else
2864 printf("%u MHz; 64bit\n", clk);
2865 }
2866}
2867
2868static int
2869bge_attach(device_t dev)
2870{
2871 struct ifnet *ifp;
2872 struct bge_softc *sc;
2873 uint32_t hwcfg = 0, misccfg;
2874 u_char eaddr[ETHER_ADDR_LEN];
2875 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2876
2877 sc = device_get_softc(dev);
2878 sc->bge_dev = dev;
2879
2880 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2881 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2882 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2883
2884 /*
2885 * Map control/status registers.
2886 */
2887 pci_enable_busmaster(dev);
2888
2889 rid = PCIR_BAR(0);
2890 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2891 RF_ACTIVE);
2892
2893 if (sc->bge_res == NULL) {
2894 device_printf (sc->bge_dev, "couldn't map memory\n");
2895 error = ENXIO;
2896 goto fail;
2897 }
2898
2899 /* Save various chip information. */
2900 sc->bge_chipid =
2901 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2902 BGE_PCIMISCCTL_ASICREV_SHIFT;
2903 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2904 /*
2905 * Find the ASCI revision. Different chips use different
2906 * registers.
2907 */
2908 switch (pci_get_device(dev)) {
2909 case BCOM_DEVICEID_BCM5717:
2910 case BCOM_DEVICEID_BCM5718:
2911 case BCOM_DEVICEID_BCM5719:
2912 case BCOM_DEVICEID_BCM5720:
2913 sc->bge_chipid = pci_read_config(dev,
2914 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2915 break;
2916 case BCOM_DEVICEID_BCM57761:
2917 case BCOM_DEVICEID_BCM57765:
2918 case BCOM_DEVICEID_BCM57781:
2919 case BCOM_DEVICEID_BCM57785:
2920 case BCOM_DEVICEID_BCM57791:
2921 case BCOM_DEVICEID_BCM57795:
2922 sc->bge_chipid = pci_read_config(dev,
2923 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2924 break;
2925 default:
2926 sc->bge_chipid = pci_read_config(dev,
2927 BGE_PCI_PRODID_ASICREV, 4);
2928 }
2929 }
2930 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2931 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2932
2933 /* Set default PHY address. */
2934 phy_addr = 1;
2935 /*
2936 * PHY address mapping for various devices.
2937 *
2938 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2939 * ---------+-------+-------+-------+-------+
2940 * BCM57XX | 1 | X | X | X |
2941 * BCM5704 | 1 | X | 1 | X |
2942 * BCM5717 | 1 | 8 | 2 | 9 |
2943 * BCM5719 | 1 | 8 | 2 | 9 |
2944 * BCM5720 | 1 | 8 | 2 | 9 |
2945 *
2946 * Other addresses may respond but they are not
2947 * IEEE compliant PHYs and should be ignored.
2948 */
2949 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2950 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2951 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2952 f = pci_get_function(dev);
2953 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2954 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2955 BGE_SGDIGSTS_IS_SERDES)
2956 phy_addr = f + 8;
2957 else
2958 phy_addr = f + 1;
2959 } else {
2960 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2961 BGE_CPMU_PHY_STRAP_IS_SERDES)
2962 phy_addr = f + 8;
2963 else
2964 phy_addr = f + 1;
2965 }
2966 }
2967
2968 /*
2969 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2970 * 5705 A0 and A1 chips.
2971 */
2972 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2973 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2974 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2975 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2976 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2977 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2978
2979 if (bge_has_eaddr(sc))
2980 sc->bge_flags |= BGE_FLAG_EADDR;
2981
2982 /* Save chipset family. */
2983 switch (sc->bge_asicrev) {
2984 case BGE_ASICREV_BCM5717:
2985 case BGE_ASICREV_BCM5719:
2986 case BGE_ASICREV_BCM5720:
2987 case BGE_ASICREV_BCM57765:
2988 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2989 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2990 BGE_FLAG_JUMBO_FRAME;
2991 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2992 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2993 /* Jumbo frame on BCM5719 A0 does not work. */
2994 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2995 }
2996 break;
2997 case BGE_ASICREV_BCM5755:
2998 case BGE_ASICREV_BCM5761:
2999 case BGE_ASICREV_BCM5784:
3000 case BGE_ASICREV_BCM5785:
3001 case BGE_ASICREV_BCM5787:
3002 case BGE_ASICREV_BCM57780:
3003 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
3004 BGE_FLAG_5705_PLUS;
3005 break;
3006 case BGE_ASICREV_BCM5700:
3007 case BGE_ASICREV_BCM5701:
3008 case BGE_ASICREV_BCM5703:
3009 case BGE_ASICREV_BCM5704:
3010 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
3011 break;
3012 case BGE_ASICREV_BCM5714_A0:
3013 case BGE_ASICREV_BCM5780:
3014 case BGE_ASICREV_BCM5714:
3015 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
3016 /* FALLTHROUGH */
3017 case BGE_ASICREV_BCM5750:
3018 case BGE_ASICREV_BCM5752:
3019 case BGE_ASICREV_BCM5906:
3020 sc->bge_flags |= BGE_FLAG_575X_PLUS;
3021 /* FALLTHROUGH */
3022 case BGE_ASICREV_BCM5705:
3023 sc->bge_flags |= BGE_FLAG_5705_PLUS;
3024 break;
3025 }
3026
3027 /* Add SYSCTLs, requires the chipset family to be set. */
3028 bge_add_sysctls(sc);
3029
3030 /* Set various PHY bug flags. */
3031 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3032 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3033 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
3034 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
3035 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
3036 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
3037 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3038 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
3039 if (pci_get_subvendor(dev) == DELL_VENDORID)
3040 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
3041 if ((BGE_IS_5705_PLUS(sc)) &&
3042 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
3043 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3044 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
3045 sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
3046 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
3047 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
3048 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
3049 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
3050 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3051 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3052 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
3053 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
3054 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
3055 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
3056 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
3057 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
3058 } else
3059 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
3060 }
3061
3062 /* Identify the chips that use an CPMU. */
3063 if (BGE_IS_5717_PLUS(sc) ||
3064 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3065 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3066 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
3067 sc->bge_asicrev == BGE_ASICREV_BCM57780)
3068 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
3069 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
3070 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
3071 else
3072 sc->bge_mi_mode = BGE_MIMODE_BASE;
3073 /* Enable auto polling for BCM570[0-5]. */
3074 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
3075 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
3076
3077 /*
3078 * All Broadcom controllers have 4GB boundary DMA bug.
3079 * Whenever an address crosses a multiple of the 4GB boundary
3080 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3081 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3082 * state machine will lockup and cause the device to hang.
3083 */
3084 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
3085
3086 /* BCM5755 or higher and BCM5906 have short DMA bug. */
3087 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3088 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
3089
3090 /*
3091 * BCM5719 cannot handle DMA requests for DMA segments that
3092 * have larger than 4KB in size. However the maximum DMA
3093 * segment size created in DMA tag is 4KB for TSO, so we
3094 * wouldn't encounter the issue here.
3095 */
3096 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3097 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3098
3099 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3100 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3101 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3102 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3103 sc->bge_flags |= BGE_FLAG_5788;
3104 }
3105
3106 capmask = BMSR_DEFCAPMASK;
3107 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3108 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3109 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3110 pci_get_vendor(dev) == BCOM_VENDORID &&
3111 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3112 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3113 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3114 (pci_get_vendor(dev) == BCOM_VENDORID &&
3115 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3116 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3117 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3118 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3119 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3120 /* These chips are 10/100 only. */
3121 capmask &= ~BMSR_EXTSTAT;
3122 }
3123
3124 /*
3125 * Some controllers seem to require a special firmware to use
3126 * TSO. But the firmware is not available to FreeBSD and Linux
3127 * claims that the TSO performed by the firmware is slower than
3128 * hardware based TSO. Moreover the firmware based TSO has one
3129 * known bug which can't handle TSO if ethernet header + IP/TCP
3130 * header is greater than 80 bytes. The workaround for the TSO
3131 * bug exist but it seems it's too expensive than not using
3132 * TSO at all. Some hardwares also have the TSO bug so limit
3133 * the TSO to the controllers that are not affected TSO issues
3134 * (e.g. 5755 or higher).
3135 */
3136 if (BGE_IS_5717_PLUS(sc)) {
3137 /* BCM5717 requires different TSO configuration. */
3138 sc->bge_flags |= BGE_FLAG_TSO3;
3139 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3140 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3141 /* TSO on BCM5719 A0 does not work. */
3142 sc->bge_flags &= ~BGE_FLAG_TSO3;
3143 }
3144 } else if (BGE_IS_5755_PLUS(sc)) {
3145 /*
3146 * BCM5754 and BCM5787 shares the same ASIC id so
3147 * explicit device id check is required.
3148 * Due to unknown reason TSO does not work on BCM5755M.
3149 */
3150 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3151 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3152 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3153 sc->bge_flags |= BGE_FLAG_TSO;
3154 }
3155
3156 /*
3157 * Check if this is a PCI-X or PCI Express device.
3158 */
3159 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3160 /*
3161 * Found a PCI Express capabilities register, this
3162 * must be a PCI Express device.
3163 */
3164 sc->bge_flags |= BGE_FLAG_PCIE;
3165 sc->bge_expcap = reg;
3166 /* Extract supported maximum payload size. */
3167 sc->bge_mps = pci_read_config(dev, sc->bge_expcap +
3168 PCIER_DEVICE_CAP, 2);
3169 sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD);
3164 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3165 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3170 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3171 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3166 pci_set_max_read_req(dev, 2048);
3167 else if (pci_get_max_read_req(dev) != 4096)
3168 pci_set_max_read_req(dev, 4096);
3172 sc->bge_expmrq = 2048;
3173 else
3174 sc->bge_expmrq = 4096;
3175 pci_set_max_read_req(dev, sc->bge_expmrq);
3169 } else {
3170 /*
3171 * Check if the device is in PCI-X Mode.
3172 * (This bit is not valid on PCI Express controllers.)
3173 */
3174 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3175 sc->bge_pcixcap = reg;
3176 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3177 BGE_PCISTATE_PCI_BUSMODE) == 0)
3178 sc->bge_flags |= BGE_FLAG_PCIX;
3179 }
3180
3181 /*
3182 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3183 * not actually a MAC controller bug but an issue with the embedded
3184 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3185 */
3186 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3187 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3188 /*
3189 * Some PCI-X bridges are known to trigger write reordering to
3190 * the mailbox registers. Typical phenomena is watchdog timeouts
3191 * caused by out-of-order TX completions. Enable workaround for
3192 * PCI-X devices that live behind these bridges.
3193 * Note, PCI-X controllers can run in PCI mode so we can't use
3194 * BGE_FLAG_PCIX flag to detect PCI-X controllers.
3195 */
3196 if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0)
3197 sc->bge_flags |= BGE_FLAG_MBOX_REORDER;
3198 /*
3199 * Allocate the interrupt, using MSI if possible. These devices
3200 * support 8 MSI messages, but only the first one is used in
3201 * normal operation.
3202 */
3203 rid = 0;
3204 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3205 sc->bge_msicap = reg;
3206 if (bge_can_use_msi(sc)) {
3207 msicount = pci_msi_count(dev);
3208 if (msicount > 1)
3209 msicount = 1;
3210 } else
3211 msicount = 0;
3212 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3213 rid = 1;
3214 sc->bge_flags |= BGE_FLAG_MSI;
3215 }
3216 }
3217
3218 /*
3219 * All controllers except BCM5700 supports tagged status but
3220 * we use tagged status only for MSI case on BCM5717. Otherwise
3221 * MSI on BCM5717 does not work.
3222 */
3223#ifndef DEVICE_POLLING
3224 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3225 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3226#endif
3227
3228 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3229 RF_SHAREABLE | RF_ACTIVE);
3230
3231 if (sc->bge_irq == NULL) {
3232 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3233 error = ENXIO;
3234 goto fail;
3235 }
3236
3237 bge_devinfo(sc);
3238
3239 /* Try to reset the chip. */
3240 if (bge_reset(sc)) {
3241 device_printf(sc->bge_dev, "chip reset failed\n");
3242 error = ENXIO;
3243 goto fail;
3244 }
3245
3246 sc->bge_asf_mode = 0;
3247 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3248 BGE_SRAM_DATA_SIG_MAGIC)) {
3249 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3250 & BGE_HWCFG_ASF) {
3251 sc->bge_asf_mode |= ASF_ENABLE;
3252 sc->bge_asf_mode |= ASF_STACKUP;
3253 if (BGE_IS_575X_PLUS(sc))
3254 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3255 }
3256 }
3257
3258 /* Try to reset the chip again the nice way. */
3259 bge_stop_fw(sc);
3260 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3261 if (bge_reset(sc)) {
3262 device_printf(sc->bge_dev, "chip reset failed\n");
3263 error = ENXIO;
3264 goto fail;
3265 }
3266
3267 bge_sig_legacy(sc, BGE_RESET_STOP);
3268 bge_sig_post_reset(sc, BGE_RESET_STOP);
3269
3270 if (bge_chipinit(sc)) {
3271 device_printf(sc->bge_dev, "chip initialization failed\n");
3272 error = ENXIO;
3273 goto fail;
3274 }
3275
3276 error = bge_get_eaddr(sc, eaddr);
3277 if (error) {
3278 device_printf(sc->bge_dev,
3279 "failed to read station address\n");
3280 error = ENXIO;
3281 goto fail;
3282 }
3283
3284 /* 5705 limits RX return ring to 512 entries. */
3285 if (BGE_IS_5717_PLUS(sc))
3286 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3287 else if (BGE_IS_5705_PLUS(sc))
3288 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3289 else
3290 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3291
3292 if (bge_dma_alloc(sc)) {
3293 device_printf(sc->bge_dev,
3294 "failed to allocate DMA resources\n");
3295 error = ENXIO;
3296 goto fail;
3297 }
3298
3299 /* Set default tuneable values. */
3300 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3301 sc->bge_rx_coal_ticks = 150;
3302 sc->bge_tx_coal_ticks = 150;
3303 sc->bge_rx_max_coal_bds = 10;
3304 sc->bge_tx_max_coal_bds = 10;
3305
3306 /* Initialize checksum features to use. */
3307 sc->bge_csum_features = BGE_CSUM_FEATURES;
3308 if (sc->bge_forced_udpcsum != 0)
3309 sc->bge_csum_features |= CSUM_UDP;
3310
3311 /* Set up ifnet structure */
3312 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3313 if (ifp == NULL) {
3314 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3315 error = ENXIO;
3316 goto fail;
3317 }
3318 ifp->if_softc = sc;
3319 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3320 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3321 ifp->if_ioctl = bge_ioctl;
3322 ifp->if_start = bge_start;
3323 ifp->if_init = bge_init;
3324 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3325 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3326 IFQ_SET_READY(&ifp->if_snd);
3327 ifp->if_hwassist = sc->bge_csum_features;
3328 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3329 IFCAP_VLAN_MTU;
3330 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3331 ifp->if_hwassist |= CSUM_TSO;
3332 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3333 }
3334#ifdef IFCAP_VLAN_HWCSUM
3335 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3336#endif
3337 ifp->if_capenable = ifp->if_capabilities;
3338#ifdef DEVICE_POLLING
3339 ifp->if_capabilities |= IFCAP_POLLING;
3340#endif
3341
3342 /*
3343 * 5700 B0 chips do not support checksumming correctly due
3344 * to hardware bugs.
3345 */
3346 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3347 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3348 ifp->if_capenable &= ~IFCAP_HWCSUM;
3349 ifp->if_hwassist = 0;
3350 }
3351
3352 /*
3353 * Figure out what sort of media we have by checking the
3354 * hardware config word in the first 32k of NIC internal memory,
3355 * or fall back to examining the EEPROM if necessary.
3356 * Note: on some BCM5700 cards, this value appears to be unset.
3357 * If that's the case, we have to rely on identifying the NIC
3358 * by its PCI subsystem ID, as we do below for the SysKonnect
3359 * SK-9D41.
3360 */
3361 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3362 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3363 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3364 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3365 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3366 sizeof(hwcfg))) {
3367 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3368 error = ENXIO;
3369 goto fail;
3370 }
3371 hwcfg = ntohl(hwcfg);
3372 }
3373
3374 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3375 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3376 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3377 if (BGE_IS_5714_FAMILY(sc))
3378 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3379 else
3380 sc->bge_flags |= BGE_FLAG_TBI;
3381 }
3382
3383 if (sc->bge_flags & BGE_FLAG_TBI) {
3384 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3385 bge_ifmedia_sts);
3386 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3387 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3388 0, NULL);
3389 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3390 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3391 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3392 } else {
3393 /*
3394 * Do transceiver setup and tell the firmware the
3395 * driver is down so we can try to get access the
3396 * probe if ASF is running. Retry a couple of times
3397 * if we get a conflict with the ASF firmware accessing
3398 * the PHY.
3399 */
3400 trys = 0;
3401 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3402again:
3403 bge_asf_driver_up(sc);
3404
3405 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3406 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3407 MIIF_DOPAUSE);
3408 if (error != 0) {
3409 if (trys++ < 4) {
3410 device_printf(sc->bge_dev, "Try again\n");
3411 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3412 BMCR_RESET);
3413 goto again;
3414 }
3415 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3416 goto fail;
3417 }
3418
3419 /*
3420 * Now tell the firmware we are going up after probing the PHY
3421 */
3422 if (sc->bge_asf_mode & ASF_STACKUP)
3423 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3424 }
3425
3426 /*
3427 * When using the BCM5701 in PCI-X mode, data corruption has
3428 * been observed in the first few bytes of some received packets.
3429 * Aligning the packet buffer in memory eliminates the corruption.
3430 * Unfortunately, this misaligns the packet payloads. On platforms
3431 * which do not support unaligned accesses, we will realign the
3432 * payloads by copying the received packets.
3433 */
3434 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3435 sc->bge_flags & BGE_FLAG_PCIX)
3436 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3437
3438 /*
3439 * Call MI attach routine.
3440 */
3441 ether_ifattach(ifp, eaddr);
3442
3443 /* Tell upper layer we support long frames. */
3444 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3445
3446 /*
3447 * Hookup IRQ last.
3448 */
3449 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3450 /* Take advantage of single-shot MSI. */
3451 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3452 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3453 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3454 taskqueue_thread_enqueue, &sc->bge_tq);
3455 if (sc->bge_tq == NULL) {
3456 device_printf(dev, "could not create taskqueue.\n");
3457 ether_ifdetach(ifp);
3458 error = ENOMEM;
3459 goto fail;
3460 }
3461 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3462 device_get_nameunit(sc->bge_dev));
3463 error = bus_setup_intr(dev, sc->bge_irq,
3464 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3465 &sc->bge_intrhand);
3466 } else
3467 error = bus_setup_intr(dev, sc->bge_irq,
3468 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3469 &sc->bge_intrhand);
3470
3471 if (error) {
3472 ether_ifdetach(ifp);
3473 device_printf(sc->bge_dev, "couldn't set up irq\n");
3474 }
3475
3476fail:
3477 if (error)
3478 bge_detach(dev);
3479 return (error);
3480}
3481
3482static int
3483bge_detach(device_t dev)
3484{
3485 struct bge_softc *sc;
3486 struct ifnet *ifp;
3487
3488 sc = device_get_softc(dev);
3489 ifp = sc->bge_ifp;
3490
3491#ifdef DEVICE_POLLING
3492 if (ifp->if_capenable & IFCAP_POLLING)
3493 ether_poll_deregister(ifp);
3494#endif
3495
3496 if (device_is_attached(dev)) {
3497 ether_ifdetach(ifp);
3498 BGE_LOCK(sc);
3499 bge_stop(sc);
3500 BGE_UNLOCK(sc);
3501 callout_drain(&sc->bge_stat_ch);
3502 }
3503
3504 if (sc->bge_tq)
3505 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3506
3507 if (sc->bge_flags & BGE_FLAG_TBI) {
3508 ifmedia_removeall(&sc->bge_ifmedia);
3509 } else {
3510 bus_generic_detach(dev);
3511 device_delete_child(dev, sc->bge_miibus);
3512 }
3513
3514 bge_release_resources(sc);
3515
3516 return (0);
3517}
3518
3519static void
3520bge_release_resources(struct bge_softc *sc)
3521{
3522 device_t dev;
3523
3524 dev = sc->bge_dev;
3525
3526 if (sc->bge_tq != NULL)
3527 taskqueue_free(sc->bge_tq);
3528
3529 if (sc->bge_intrhand != NULL)
3530 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3531
3532 if (sc->bge_irq != NULL)
3533 bus_release_resource(dev, SYS_RES_IRQ,
3534 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3535
3536 if (sc->bge_flags & BGE_FLAG_MSI)
3537 pci_release_msi(dev);
3538
3539 if (sc->bge_res != NULL)
3540 bus_release_resource(dev, SYS_RES_MEMORY,
3541 PCIR_BAR(0), sc->bge_res);
3542
3543 if (sc->bge_ifp != NULL)
3544 if_free(sc->bge_ifp);
3545
3546 bge_dma_free(sc);
3547
3548 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3549 BGE_LOCK_DESTROY(sc);
3550}
3551
3552static int
3553bge_reset(struct bge_softc *sc)
3554{
3555 device_t dev;
3556 uint32_t cachesize, command, pcistate, reset, val;
3557 void (*write_op)(struct bge_softc *, int, int);
3558 uint16_t devctl;
3559 int i;
3560
3561 dev = sc->bge_dev;
3562
3563 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3564 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3565 if (sc->bge_flags & BGE_FLAG_PCIE)
3566 write_op = bge_writemem_direct;
3567 else
3568 write_op = bge_writemem_ind;
3569 } else
3570 write_op = bge_writereg_ind;
3571
3572 /* Save some important PCI state. */
3573 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3574 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3575 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3576
3577 pci_write_config(dev, BGE_PCI_MISC_CTL,
3578 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3579 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3580
3581 /* Disable fastboot on controllers that support it. */
3582 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3583 BGE_IS_5755_PLUS(sc)) {
3584 if (bootverbose)
3585 device_printf(dev, "Disabling fastboot\n");
3586 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3587 }
3588
3589 /*
3590 * Write the magic number to SRAM at offset 0xB50.
3591 * When firmware finishes its initialization it will
3592 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3593 */
3594 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3595
3596 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3597
3598 /* XXX: Broadcom Linux driver. */
3599 if (sc->bge_flags & BGE_FLAG_PCIE) {
3600 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3601 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3602 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3603 /* Prevent PCIE link training during global reset */
3604 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3605 reset |= 1 << 29;
3606 }
3607 }
3608
3609 /*
3610 * Set GPHY Power Down Override to leave GPHY
3611 * powered up in D0 uninitialized.
3612 */
3613 if (BGE_IS_5705_PLUS(sc) &&
3614 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3615 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3616
3617 /* Issue global reset */
3618 write_op(sc, BGE_MISC_CFG, reset);
3619
3620 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3621 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3622 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3623 val | BGE_VCPU_STATUS_DRV_RESET);
3624 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3625 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3626 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3627 }
3628
3629 DELAY(1000);
3630
3631 /* XXX: Broadcom Linux driver. */
3632 if (sc->bge_flags & BGE_FLAG_PCIE) {
3633 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3634 DELAY(500000); /* wait for link training to complete */
3635 val = pci_read_config(dev, 0xC4, 4);
3636 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3637 }
3638 devctl = pci_read_config(dev,
3639 sc->bge_expcap + PCIER_DEVICE_CTL, 2);
3640 /* Clear enable no snoop and disable relaxed ordering. */
3641 devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE |
3642 PCIEM_CTL_NOSNOOP_ENABLE);
3643 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL,
3644 devctl, 2);
3176 } else {
3177 /*
3178 * Check if the device is in PCI-X Mode.
3179 * (This bit is not valid on PCI Express controllers.)
3180 */
3181 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3182 sc->bge_pcixcap = reg;
3183 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3184 BGE_PCISTATE_PCI_BUSMODE) == 0)
3185 sc->bge_flags |= BGE_FLAG_PCIX;
3186 }
3187
3188 /*
3189 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3190 * not actually a MAC controller bug but an issue with the embedded
3191 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3192 */
3193 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3194 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3195 /*
3196 * Some PCI-X bridges are known to trigger write reordering to
3197 * the mailbox registers. Typical phenomena is watchdog timeouts
3198 * caused by out-of-order TX completions. Enable workaround for
3199 * PCI-X devices that live behind these bridges.
3200 * Note, PCI-X controllers can run in PCI mode so we can't use
3201 * BGE_FLAG_PCIX flag to detect PCI-X controllers.
3202 */
3203 if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0)
3204 sc->bge_flags |= BGE_FLAG_MBOX_REORDER;
3205 /*
3206 * Allocate the interrupt, using MSI if possible. These devices
3207 * support 8 MSI messages, but only the first one is used in
3208 * normal operation.
3209 */
3210 rid = 0;
3211 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3212 sc->bge_msicap = reg;
3213 if (bge_can_use_msi(sc)) {
3214 msicount = pci_msi_count(dev);
3215 if (msicount > 1)
3216 msicount = 1;
3217 } else
3218 msicount = 0;
3219 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3220 rid = 1;
3221 sc->bge_flags |= BGE_FLAG_MSI;
3222 }
3223 }
3224
3225 /*
3226 * All controllers except BCM5700 supports tagged status but
3227 * we use tagged status only for MSI case on BCM5717. Otherwise
3228 * MSI on BCM5717 does not work.
3229 */
3230#ifndef DEVICE_POLLING
3231 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3232 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3233#endif
3234
3235 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3236 RF_SHAREABLE | RF_ACTIVE);
3237
3238 if (sc->bge_irq == NULL) {
3239 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3240 error = ENXIO;
3241 goto fail;
3242 }
3243
3244 bge_devinfo(sc);
3245
3246 /* Try to reset the chip. */
3247 if (bge_reset(sc)) {
3248 device_printf(sc->bge_dev, "chip reset failed\n");
3249 error = ENXIO;
3250 goto fail;
3251 }
3252
3253 sc->bge_asf_mode = 0;
3254 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3255 BGE_SRAM_DATA_SIG_MAGIC)) {
3256 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3257 & BGE_HWCFG_ASF) {
3258 sc->bge_asf_mode |= ASF_ENABLE;
3259 sc->bge_asf_mode |= ASF_STACKUP;
3260 if (BGE_IS_575X_PLUS(sc))
3261 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3262 }
3263 }
3264
3265 /* Try to reset the chip again the nice way. */
3266 bge_stop_fw(sc);
3267 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3268 if (bge_reset(sc)) {
3269 device_printf(sc->bge_dev, "chip reset failed\n");
3270 error = ENXIO;
3271 goto fail;
3272 }
3273
3274 bge_sig_legacy(sc, BGE_RESET_STOP);
3275 bge_sig_post_reset(sc, BGE_RESET_STOP);
3276
3277 if (bge_chipinit(sc)) {
3278 device_printf(sc->bge_dev, "chip initialization failed\n");
3279 error = ENXIO;
3280 goto fail;
3281 }
3282
3283 error = bge_get_eaddr(sc, eaddr);
3284 if (error) {
3285 device_printf(sc->bge_dev,
3286 "failed to read station address\n");
3287 error = ENXIO;
3288 goto fail;
3289 }
3290
3291 /* 5705 limits RX return ring to 512 entries. */
3292 if (BGE_IS_5717_PLUS(sc))
3293 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3294 else if (BGE_IS_5705_PLUS(sc))
3295 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3296 else
3297 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3298
3299 if (bge_dma_alloc(sc)) {
3300 device_printf(sc->bge_dev,
3301 "failed to allocate DMA resources\n");
3302 error = ENXIO;
3303 goto fail;
3304 }
3305
3306 /* Set default tuneable values. */
3307 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3308 sc->bge_rx_coal_ticks = 150;
3309 sc->bge_tx_coal_ticks = 150;
3310 sc->bge_rx_max_coal_bds = 10;
3311 sc->bge_tx_max_coal_bds = 10;
3312
3313 /* Initialize checksum features to use. */
3314 sc->bge_csum_features = BGE_CSUM_FEATURES;
3315 if (sc->bge_forced_udpcsum != 0)
3316 sc->bge_csum_features |= CSUM_UDP;
3317
3318 /* Set up ifnet structure */
3319 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3320 if (ifp == NULL) {
3321 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3322 error = ENXIO;
3323 goto fail;
3324 }
3325 ifp->if_softc = sc;
3326 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3327 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3328 ifp->if_ioctl = bge_ioctl;
3329 ifp->if_start = bge_start;
3330 ifp->if_init = bge_init;
3331 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3332 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3333 IFQ_SET_READY(&ifp->if_snd);
3334 ifp->if_hwassist = sc->bge_csum_features;
3335 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3336 IFCAP_VLAN_MTU;
3337 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3338 ifp->if_hwassist |= CSUM_TSO;
3339 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3340 }
3341#ifdef IFCAP_VLAN_HWCSUM
3342 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3343#endif
3344 ifp->if_capenable = ifp->if_capabilities;
3345#ifdef DEVICE_POLLING
3346 ifp->if_capabilities |= IFCAP_POLLING;
3347#endif
3348
3349 /*
3350 * 5700 B0 chips do not support checksumming correctly due
3351 * to hardware bugs.
3352 */
3353 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3354 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3355 ifp->if_capenable &= ~IFCAP_HWCSUM;
3356 ifp->if_hwassist = 0;
3357 }
3358
3359 /*
3360 * Figure out what sort of media we have by checking the
3361 * hardware config word in the first 32k of NIC internal memory,
3362 * or fall back to examining the EEPROM if necessary.
3363 * Note: on some BCM5700 cards, this value appears to be unset.
3364 * If that's the case, we have to rely on identifying the NIC
3365 * by its PCI subsystem ID, as we do below for the SysKonnect
3366 * SK-9D41.
3367 */
3368 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3369 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3370 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3371 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3372 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3373 sizeof(hwcfg))) {
3374 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3375 error = ENXIO;
3376 goto fail;
3377 }
3378 hwcfg = ntohl(hwcfg);
3379 }
3380
3381 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3382 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3383 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3384 if (BGE_IS_5714_FAMILY(sc))
3385 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3386 else
3387 sc->bge_flags |= BGE_FLAG_TBI;
3388 }
3389
3390 if (sc->bge_flags & BGE_FLAG_TBI) {
3391 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3392 bge_ifmedia_sts);
3393 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3394 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3395 0, NULL);
3396 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3397 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3398 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3399 } else {
3400 /*
3401 * Do transceiver setup and tell the firmware the
3402 * driver is down so we can try to get access the
3403 * probe if ASF is running. Retry a couple of times
3404 * if we get a conflict with the ASF firmware accessing
3405 * the PHY.
3406 */
3407 trys = 0;
3408 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3409again:
3410 bge_asf_driver_up(sc);
3411
3412 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3413 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3414 MIIF_DOPAUSE);
3415 if (error != 0) {
3416 if (trys++ < 4) {
3417 device_printf(sc->bge_dev, "Try again\n");
3418 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3419 BMCR_RESET);
3420 goto again;
3421 }
3422 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3423 goto fail;
3424 }
3425
3426 /*
3427 * Now tell the firmware we are going up after probing the PHY
3428 */
3429 if (sc->bge_asf_mode & ASF_STACKUP)
3430 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3431 }
3432
3433 /*
3434 * When using the BCM5701 in PCI-X mode, data corruption has
3435 * been observed in the first few bytes of some received packets.
3436 * Aligning the packet buffer in memory eliminates the corruption.
3437 * Unfortunately, this misaligns the packet payloads. On platforms
3438 * which do not support unaligned accesses, we will realign the
3439 * payloads by copying the received packets.
3440 */
3441 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3442 sc->bge_flags & BGE_FLAG_PCIX)
3443 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3444
3445 /*
3446 * Call MI attach routine.
3447 */
3448 ether_ifattach(ifp, eaddr);
3449
3450 /* Tell upper layer we support long frames. */
3451 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3452
3453 /*
3454 * Hookup IRQ last.
3455 */
3456 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3457 /* Take advantage of single-shot MSI. */
3458 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3459 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3460 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3461 taskqueue_thread_enqueue, &sc->bge_tq);
3462 if (sc->bge_tq == NULL) {
3463 device_printf(dev, "could not create taskqueue.\n");
3464 ether_ifdetach(ifp);
3465 error = ENOMEM;
3466 goto fail;
3467 }
3468 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3469 device_get_nameunit(sc->bge_dev));
3470 error = bus_setup_intr(dev, sc->bge_irq,
3471 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3472 &sc->bge_intrhand);
3473 } else
3474 error = bus_setup_intr(dev, sc->bge_irq,
3475 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3476 &sc->bge_intrhand);
3477
3478 if (error) {
3479 ether_ifdetach(ifp);
3480 device_printf(sc->bge_dev, "couldn't set up irq\n");
3481 }
3482
3483fail:
3484 if (error)
3485 bge_detach(dev);
3486 return (error);
3487}
3488
3489static int
3490bge_detach(device_t dev)
3491{
3492 struct bge_softc *sc;
3493 struct ifnet *ifp;
3494
3495 sc = device_get_softc(dev);
3496 ifp = sc->bge_ifp;
3497
3498#ifdef DEVICE_POLLING
3499 if (ifp->if_capenable & IFCAP_POLLING)
3500 ether_poll_deregister(ifp);
3501#endif
3502
3503 if (device_is_attached(dev)) {
3504 ether_ifdetach(ifp);
3505 BGE_LOCK(sc);
3506 bge_stop(sc);
3507 BGE_UNLOCK(sc);
3508 callout_drain(&sc->bge_stat_ch);
3509 }
3510
3511 if (sc->bge_tq)
3512 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3513
3514 if (sc->bge_flags & BGE_FLAG_TBI) {
3515 ifmedia_removeall(&sc->bge_ifmedia);
3516 } else {
3517 bus_generic_detach(dev);
3518 device_delete_child(dev, sc->bge_miibus);
3519 }
3520
3521 bge_release_resources(sc);
3522
3523 return (0);
3524}
3525
3526static void
3527bge_release_resources(struct bge_softc *sc)
3528{
3529 device_t dev;
3530
3531 dev = sc->bge_dev;
3532
3533 if (sc->bge_tq != NULL)
3534 taskqueue_free(sc->bge_tq);
3535
3536 if (sc->bge_intrhand != NULL)
3537 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3538
3539 if (sc->bge_irq != NULL)
3540 bus_release_resource(dev, SYS_RES_IRQ,
3541 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3542
3543 if (sc->bge_flags & BGE_FLAG_MSI)
3544 pci_release_msi(dev);
3545
3546 if (sc->bge_res != NULL)
3547 bus_release_resource(dev, SYS_RES_MEMORY,
3548 PCIR_BAR(0), sc->bge_res);
3549
3550 if (sc->bge_ifp != NULL)
3551 if_free(sc->bge_ifp);
3552
3553 bge_dma_free(sc);
3554
3555 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3556 BGE_LOCK_DESTROY(sc);
3557}
3558
3559static int
3560bge_reset(struct bge_softc *sc)
3561{
3562 device_t dev;
3563 uint32_t cachesize, command, pcistate, reset, val;
3564 void (*write_op)(struct bge_softc *, int, int);
3565 uint16_t devctl;
3566 int i;
3567
3568 dev = sc->bge_dev;
3569
3570 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3571 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3572 if (sc->bge_flags & BGE_FLAG_PCIE)
3573 write_op = bge_writemem_direct;
3574 else
3575 write_op = bge_writemem_ind;
3576 } else
3577 write_op = bge_writereg_ind;
3578
3579 /* Save some important PCI state. */
3580 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3581 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3582 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3583
3584 pci_write_config(dev, BGE_PCI_MISC_CTL,
3585 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3586 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3587
3588 /* Disable fastboot on controllers that support it. */
3589 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3590 BGE_IS_5755_PLUS(sc)) {
3591 if (bootverbose)
3592 device_printf(dev, "Disabling fastboot\n");
3593 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3594 }
3595
3596 /*
3597 * Write the magic number to SRAM at offset 0xB50.
3598 * When firmware finishes its initialization it will
3599 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3600 */
3601 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3602
3603 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3604
3605 /* XXX: Broadcom Linux driver. */
3606 if (sc->bge_flags & BGE_FLAG_PCIE) {
3607 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3608 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3609 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3610 /* Prevent PCIE link training during global reset */
3611 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3612 reset |= 1 << 29;
3613 }
3614 }
3615
3616 /*
3617 * Set GPHY Power Down Override to leave GPHY
3618 * powered up in D0 uninitialized.
3619 */
3620 if (BGE_IS_5705_PLUS(sc) &&
3621 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3622 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3623
3624 /* Issue global reset */
3625 write_op(sc, BGE_MISC_CFG, reset);
3626
3627 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3628 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3629 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3630 val | BGE_VCPU_STATUS_DRV_RESET);
3631 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3632 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3633 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3634 }
3635
3636 DELAY(1000);
3637
3638 /* XXX: Broadcom Linux driver. */
3639 if (sc->bge_flags & BGE_FLAG_PCIE) {
3640 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3641 DELAY(500000); /* wait for link training to complete */
3642 val = pci_read_config(dev, 0xC4, 4);
3643 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3644 }
3645 devctl = pci_read_config(dev,
3646 sc->bge_expcap + PCIER_DEVICE_CTL, 2);
3647 /* Clear enable no snoop and disable relaxed ordering. */
3648 devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE |
3649 PCIEM_CTL_NOSNOOP_ENABLE);
3650 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL,
3651 devctl, 2);
3652 pci_set_max_read_req(dev, sc->bge_expmrq);
3645 /* Clear error status. */
3646 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA,
3647 PCIEM_STA_CORRECTABLE_ERROR |
3648 PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
3649 PCIEM_STA_UNSUPPORTED_REQ, 2);
3650 }
3651
3652 /* Reset some of the PCI state that got zapped by reset. */
3653 pci_write_config(dev, BGE_PCI_MISC_CTL,
3654 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3655 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3656 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3657 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3658 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3659 /*
3660 * Disable PCI-X relaxed ordering to ensure status block update
3661 * comes first then packet buffer DMA. Otherwise driver may
3662 * read stale status block.
3663 */
3664 if (sc->bge_flags & BGE_FLAG_PCIX) {
3665 devctl = pci_read_config(dev,
3666 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3667 devctl &= ~PCIXM_COMMAND_ERO;
3668 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3669 devctl &= ~PCIXM_COMMAND_MAX_READ;
3670 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3671 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3672 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3673 PCIXM_COMMAND_MAX_READ);
3674 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3675 }
3676 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3677 devctl, 2);
3678 }
3679 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3680 if (BGE_IS_5714_FAMILY(sc)) {
3681 /* This chip disables MSI on reset. */
3682 if (sc->bge_flags & BGE_FLAG_MSI) {
3683 val = pci_read_config(dev,
3684 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3685 pci_write_config(dev,
3686 sc->bge_msicap + PCIR_MSI_CTRL,
3687 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3688 val = CSR_READ_4(sc, BGE_MSI_MODE);
3689 CSR_WRITE_4(sc, BGE_MSI_MODE,
3690 val | BGE_MSIMODE_ENABLE);
3691 }
3692 val = CSR_READ_4(sc, BGE_MARB_MODE);
3693 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3694 } else
3695 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3696
3697 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3698 for (i = 0; i < BGE_TIMEOUT; i++) {
3699 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3700 if (val & BGE_VCPU_STATUS_INIT_DONE)
3701 break;
3702 DELAY(100);
3703 }
3704 if (i == BGE_TIMEOUT) {
3705 device_printf(dev, "reset timed out\n");
3706 return (1);
3707 }
3708 } else {
3709 /*
3710 * Poll until we see the 1's complement of the magic number.
3711 * This indicates that the firmware initialization is complete.
3712 * We expect this to fail if no chip containing the Ethernet
3713 * address is fitted though.
3714 */
3715 for (i = 0; i < BGE_TIMEOUT; i++) {
3716 DELAY(10);
3717 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3718 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3719 break;
3720 }
3721
3722 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3723 device_printf(dev,
3724 "firmware handshake timed out, found 0x%08x\n",
3725 val);
3726 /* BCM57765 A0 needs additional time before accessing. */
3727 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3728 DELAY(10 * 1000); /* XXX */
3729 }
3730
3731 /*
3732 * XXX Wait for the value of the PCISTATE register to
3733 * return to its original pre-reset state. This is a
3734 * fairly good indicator of reset completion. If we don't
3735 * wait for the reset to fully complete, trying to read
3736 * from the device's non-PCI registers may yield garbage
3737 * results.
3738 */
3739 for (i = 0; i < BGE_TIMEOUT; i++) {
3740 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3741 break;
3742 DELAY(10);
3743 }
3744
3745 /* Fix up byte swapping. */
3746 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3747
3748 /* Tell the ASF firmware we are up */
3749 if (sc->bge_asf_mode & ASF_STACKUP)
3750 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3751
3752 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3753 DELAY(40);
3754
3755 /*
3756 * The 5704 in TBI mode apparently needs some special
3757 * adjustment to insure the SERDES drive level is set
3758 * to 1.2V.
3759 */
3760 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3761 sc->bge_flags & BGE_FLAG_TBI) {
3762 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3763 val = (val & ~0xFFF) | 0x880;
3764 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3765 }
3766
3767 /* XXX: Broadcom Linux driver. */
3768 if (sc->bge_flags & BGE_FLAG_PCIE &&
3769 !BGE_IS_5717_PLUS(sc) &&
3770 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3771 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3772 /* Enable Data FIFO protection. */
3773 val = CSR_READ_4(sc, 0x7C00);
3774 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3775 }
3776 DELAY(10000);
3777
3778 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
3779 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3780 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3781
3782 return (0);
3783}
3784
3785static __inline void
3786bge_rxreuse_std(struct bge_softc *sc, int i)
3787{
3788 struct bge_rx_bd *r;
3789
3790 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3791 r->bge_flags = BGE_RXBDFLAG_END;
3792 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3793 r->bge_idx = i;
3794 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3795}
3796
3797static __inline void
3798bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3799{
3800 struct bge_extrx_bd *r;
3801
3802 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3803 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3804 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3805 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3806 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3807 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3808 r->bge_idx = i;
3809 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3810}
3811
3812/*
3813 * Frame reception handling. This is called if there's a frame
3814 * on the receive return list.
3815 *
3816 * Note: we have to be able to handle two possibilities here:
3817 * 1) the frame is from the jumbo receive ring
3818 * 2) the frame is from the standard receive ring
3819 */
3820
3821static int
3822bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3823{
3824 struct ifnet *ifp;
3825 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3826 uint16_t rx_cons;
3827
3828 rx_cons = sc->bge_rx_saved_considx;
3829
3830 /* Nothing to do. */
3831 if (rx_cons == rx_prod)
3832 return (rx_npkts);
3833
3834 ifp = sc->bge_ifp;
3835
3836 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3837 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3838 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3839 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3840 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3841 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3842 (MCLBYTES - ETHER_ALIGN))
3843 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3844 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3845
3846 while (rx_cons != rx_prod) {
3847 struct bge_rx_bd *cur_rx;
3848 uint32_t rxidx;
3849 struct mbuf *m = NULL;
3850 uint16_t vlan_tag = 0;
3851 int have_tag = 0;
3852
3853#ifdef DEVICE_POLLING
3854 if (ifp->if_capenable & IFCAP_POLLING) {
3855 if (sc->rxcycles <= 0)
3856 break;
3857 sc->rxcycles--;
3858 }
3859#endif
3860
3861 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3862
3863 rxidx = cur_rx->bge_idx;
3864 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3865
3866 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3867 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3868 have_tag = 1;
3869 vlan_tag = cur_rx->bge_vlan_tag;
3870 }
3871
3872 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3873 jumbocnt++;
3874 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3875 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3876 bge_rxreuse_jumbo(sc, rxidx);
3877 continue;
3878 }
3879 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3880 bge_rxreuse_jumbo(sc, rxidx);
3881 ifp->if_iqdrops++;
3882 continue;
3883 }
3884 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3885 } else {
3886 stdcnt++;
3887 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3888 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3889 bge_rxreuse_std(sc, rxidx);
3890 continue;
3891 }
3892 if (bge_newbuf_std(sc, rxidx) != 0) {
3893 bge_rxreuse_std(sc, rxidx);
3894 ifp->if_iqdrops++;
3895 continue;
3896 }
3897 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3898 }
3899
3900 ifp->if_ipackets++;
3901#ifndef __NO_STRICT_ALIGNMENT
3902 /*
3903 * For architectures with strict alignment we must make sure
3904 * the payload is aligned.
3905 */
3906 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3907 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3908 cur_rx->bge_len);
3909 m->m_data += ETHER_ALIGN;
3910 }
3911#endif
3912 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3913 m->m_pkthdr.rcvif = ifp;
3914
3915 if (ifp->if_capenable & IFCAP_RXCSUM)
3916 bge_rxcsum(sc, cur_rx, m);
3917
3918 /*
3919 * If we received a packet with a vlan tag,
3920 * attach that information to the packet.
3921 */
3922 if (have_tag) {
3923 m->m_pkthdr.ether_vtag = vlan_tag;
3924 m->m_flags |= M_VLANTAG;
3925 }
3926
3927 if (holdlck != 0) {
3928 BGE_UNLOCK(sc);
3929 (*ifp->if_input)(ifp, m);
3930 BGE_LOCK(sc);
3931 } else
3932 (*ifp->if_input)(ifp, m);
3933 rx_npkts++;
3934
3935 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3936 return (rx_npkts);
3937 }
3938
3939 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3940 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3941 if (stdcnt > 0)
3942 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3943 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3944
3945 if (jumbocnt > 0)
3946 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3947 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3948
3949 sc->bge_rx_saved_considx = rx_cons;
3950 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3951 if (stdcnt)
3952 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3953 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3954 if (jumbocnt)
3955 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3956 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3957#ifdef notyet
3958 /*
3959 * This register wraps very quickly under heavy packet drops.
3960 * If you need correct statistics, you can enable this check.
3961 */
3962 if (BGE_IS_5705_PLUS(sc))
3963 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3964#endif
3965 return (rx_npkts);
3966}
3967
3968static void
3969bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3970{
3971
3972 if (BGE_IS_5717_PLUS(sc)) {
3973 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3974 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3975 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3976 if ((cur_rx->bge_error_flag &
3977 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3978 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3979 }
3980 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3981 m->m_pkthdr.csum_data =
3982 cur_rx->bge_tcp_udp_csum;
3983 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3984 CSUM_PSEUDO_HDR;
3985 }
3986 }
3987 } else {
3988 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3989 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3990 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3991 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3992 }
3993 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3994 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3995 m->m_pkthdr.csum_data =
3996 cur_rx->bge_tcp_udp_csum;
3997 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3998 CSUM_PSEUDO_HDR;
3999 }
4000 }
4001}
4002
4003static void
4004bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
4005{
4006 struct bge_tx_bd *cur_tx;
4007 struct ifnet *ifp;
4008
4009 BGE_LOCK_ASSERT(sc);
4010
4011 /* Nothing to do. */
4012 if (sc->bge_tx_saved_considx == tx_cons)
4013 return;
4014
4015 ifp = sc->bge_ifp;
4016
4017 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4018 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
4019 /*
4020 * Go through our tx ring and free mbufs for those
4021 * frames that have been sent.
4022 */
4023 while (sc->bge_tx_saved_considx != tx_cons) {
4024 uint32_t idx;
4025
4026 idx = sc->bge_tx_saved_considx;
4027 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
4028 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4029 ifp->if_opackets++;
4030 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
4031 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
4032 sc->bge_cdata.bge_tx_dmamap[idx],
4033 BUS_DMASYNC_POSTWRITE);
4034 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
4035 sc->bge_cdata.bge_tx_dmamap[idx]);
4036 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
4037 sc->bge_cdata.bge_tx_chain[idx] = NULL;
4038 }
4039 sc->bge_txcnt--;
4040 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4041 }
4042
4043 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4044 if (sc->bge_txcnt == 0)
4045 sc->bge_timer = 0;
4046}
4047
4048#ifdef DEVICE_POLLING
4049static int
4050bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4051{
4052 struct bge_softc *sc = ifp->if_softc;
4053 uint16_t rx_prod, tx_cons;
4054 uint32_t statusword;
4055 int rx_npkts = 0;
4056
4057 BGE_LOCK(sc);
4058 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4059 BGE_UNLOCK(sc);
4060 return (rx_npkts);
4061 }
4062
4063 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4064 sc->bge_cdata.bge_status_map,
4065 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4066 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4067 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4068
4069 statusword = sc->bge_ldata.bge_status_block->bge_status;
4070 sc->bge_ldata.bge_status_block->bge_status = 0;
4071
4072 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4073 sc->bge_cdata.bge_status_map,
4074 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4075
4076 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
4077 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
4078 sc->bge_link_evt++;
4079
4080 if (cmd == POLL_AND_CHECK_STATUS)
4081 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4082 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4083 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
4084 bge_link_upd(sc);
4085
4086 sc->rxcycles = count;
4087 rx_npkts = bge_rxeof(sc, rx_prod, 1);
4088 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4089 BGE_UNLOCK(sc);
4090 return (rx_npkts);
4091 }
4092 bge_txeof(sc, tx_cons);
4093 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4094 bge_start_locked(ifp);
4095
4096 BGE_UNLOCK(sc);
4097 return (rx_npkts);
4098}
4099#endif /* DEVICE_POLLING */
4100
4101static int
4102bge_msi_intr(void *arg)
4103{
4104 struct bge_softc *sc;
4105
4106 sc = (struct bge_softc *)arg;
4107 /*
4108 * This interrupt is not shared and controller already
4109 * disabled further interrupt.
4110 */
4111 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4112 return (FILTER_HANDLED);
4113}
4114
4115static void
4116bge_intr_task(void *arg, int pending)
4117{
4118 struct bge_softc *sc;
4119 struct ifnet *ifp;
4120 uint32_t status, status_tag;
4121 uint16_t rx_prod, tx_cons;
4122
4123 sc = (struct bge_softc *)arg;
4124 ifp = sc->bge_ifp;
4125
4126 BGE_LOCK(sc);
4127 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4128 BGE_UNLOCK(sc);
4129 return;
4130 }
4131
4132 /* Get updated status block. */
4133 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4134 sc->bge_cdata.bge_status_map,
4135 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4136
4137 /* Save producer/consumer indexess. */
4138 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4139 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4140 status = sc->bge_ldata.bge_status_block->bge_status;
4141 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4142 sc->bge_ldata.bge_status_block->bge_status = 0;
4143 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4144 sc->bge_cdata.bge_status_map,
4145 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4146 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4147 status_tag = 0;
4148
4149 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4150 bge_link_upd(sc);
4151
4152 /* Let controller work. */
4153 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4154
4155 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4156 sc->bge_rx_saved_considx != rx_prod) {
4157 /* Check RX return ring producer/consumer. */
4158 BGE_UNLOCK(sc);
4159 bge_rxeof(sc, rx_prod, 0);
4160 BGE_LOCK(sc);
4161 }
4162 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4163 /* Check TX ring producer/consumer. */
4164 bge_txeof(sc, tx_cons);
4165 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4166 bge_start_locked(ifp);
4167 }
4168 BGE_UNLOCK(sc);
4169}
4170
4171static void
4172bge_intr(void *xsc)
4173{
4174 struct bge_softc *sc;
4175 struct ifnet *ifp;
4176 uint32_t statusword;
4177 uint16_t rx_prod, tx_cons;
4178
4179 sc = xsc;
4180
4181 BGE_LOCK(sc);
4182
4183 ifp = sc->bge_ifp;
4184
4185#ifdef DEVICE_POLLING
4186 if (ifp->if_capenable & IFCAP_POLLING) {
4187 BGE_UNLOCK(sc);
4188 return;
4189 }
4190#endif
4191
4192 /*
4193 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4194 * disable interrupts by writing nonzero like we used to, since with
4195 * our current organization this just gives complications and
4196 * pessimizations for re-enabling interrupts. We used to have races
4197 * instead of the necessary complications. Disabling interrupts
4198 * would just reduce the chance of a status update while we are
4199 * running (by switching to the interrupt-mode coalescence
4200 * parameters), but this chance is already very low so it is more
4201 * efficient to get another interrupt than prevent it.
4202 *
4203 * We do the ack first to ensure another interrupt if there is a
4204 * status update after the ack. We don't check for the status
4205 * changing later because it is more efficient to get another
4206 * interrupt than prevent it, not quite as above (not checking is
4207 * a smaller optimization than not toggling the interrupt enable,
4208 * since checking doesn't involve PCI accesses and toggling require
4209 * the status check). So toggling would probably be a pessimization
4210 * even with MSI. It would only be needed for using a task queue.
4211 */
4212 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4213
4214 /*
4215 * Do the mandatory PCI flush as well as get the link status.
4216 */
4217 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4218
4219 /* Make sure the descriptor ring indexes are coherent. */
4220 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4221 sc->bge_cdata.bge_status_map,
4222 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4223 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4224 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4225 sc->bge_ldata.bge_status_block->bge_status = 0;
4226 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4227 sc->bge_cdata.bge_status_map,
4228 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4229
4230 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4231 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4232 statusword || sc->bge_link_evt)
4233 bge_link_upd(sc);
4234
4235 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4236 /* Check RX return ring producer/consumer. */
4237 bge_rxeof(sc, rx_prod, 1);
4238 }
4239
4240 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4241 /* Check TX ring producer/consumer. */
4242 bge_txeof(sc, tx_cons);
4243 }
4244
4245 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4246 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4247 bge_start_locked(ifp);
4248
4249 BGE_UNLOCK(sc);
4250}
4251
4252static void
4253bge_asf_driver_up(struct bge_softc *sc)
4254{
4255 if (sc->bge_asf_mode & ASF_STACKUP) {
4256 /* Send ASF heartbeat aprox. every 2s */
4257 if (sc->bge_asf_count)
4258 sc->bge_asf_count --;
4259 else {
4260 sc->bge_asf_count = 2;
4261 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4262 BGE_FW_CMD_DRV_ALIVE);
4263 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4264 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4265 BGE_FW_HB_TIMEOUT_SEC);
4266 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4267 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4268 BGE_RX_CPU_DRV_EVENT);
4269 }
4270 }
4271}
4272
4273static void
4274bge_tick(void *xsc)
4275{
4276 struct bge_softc *sc = xsc;
4277 struct mii_data *mii = NULL;
4278
4279 BGE_LOCK_ASSERT(sc);
4280
4281 /* Synchronize with possible callout reset/stop. */
4282 if (callout_pending(&sc->bge_stat_ch) ||
4283 !callout_active(&sc->bge_stat_ch))
4284 return;
4285
4286 if (BGE_IS_5705_PLUS(sc))
4287 bge_stats_update_regs(sc);
4288 else
4289 bge_stats_update(sc);
4290
4291 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4292 mii = device_get_softc(sc->bge_miibus);
4293 /*
4294 * Do not touch PHY if we have link up. This could break
4295 * IPMI/ASF mode or produce extra input errors
4296 * (extra errors was reported for bcm5701 & bcm5704).
4297 */
4298 if (!sc->bge_link)
4299 mii_tick(mii);
4300 } else {
4301 /*
4302 * Since in TBI mode auto-polling can't be used we should poll
4303 * link status manually. Here we register pending link event
4304 * and trigger interrupt.
4305 */
4306#ifdef DEVICE_POLLING
4307 /* In polling mode we poll link state in bge_poll(). */
4308 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4309#endif
4310 {
4311 sc->bge_link_evt++;
4312 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4313 sc->bge_flags & BGE_FLAG_5788)
4314 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4315 else
4316 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4317 }
4318 }
4319
4320 bge_asf_driver_up(sc);
4321 bge_watchdog(sc);
4322
4323 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4324}
4325
4326static void
4327bge_stats_update_regs(struct bge_softc *sc)
4328{
4329 struct ifnet *ifp;
4330 struct bge_mac_stats *stats;
4331
4332 ifp = sc->bge_ifp;
4333 stats = &sc->bge_mac_stats;
4334
4335 stats->ifHCOutOctets +=
4336 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4337 stats->etherStatsCollisions +=
4338 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4339 stats->outXonSent +=
4340 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4341 stats->outXoffSent +=
4342 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4343 stats->dot3StatsInternalMacTransmitErrors +=
4344 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4345 stats->dot3StatsSingleCollisionFrames +=
4346 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4347 stats->dot3StatsMultipleCollisionFrames +=
4348 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4349 stats->dot3StatsDeferredTransmissions +=
4350 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4351 stats->dot3StatsExcessiveCollisions +=
4352 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4353 stats->dot3StatsLateCollisions +=
4354 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4355 stats->ifHCOutUcastPkts +=
4356 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4357 stats->ifHCOutMulticastPkts +=
4358 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4359 stats->ifHCOutBroadcastPkts +=
4360 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4361
4362 stats->ifHCInOctets +=
4363 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4364 stats->etherStatsFragments +=
4365 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4366 stats->ifHCInUcastPkts +=
4367 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4368 stats->ifHCInMulticastPkts +=
4369 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4370 stats->ifHCInBroadcastPkts +=
4371 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4372 stats->dot3StatsFCSErrors +=
4373 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4374 stats->dot3StatsAlignmentErrors +=
4375 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4376 stats->xonPauseFramesReceived +=
4377 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4378 stats->xoffPauseFramesReceived +=
4379 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4380 stats->macControlFramesReceived +=
4381 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4382 stats->xoffStateEntered +=
4383 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4384 stats->dot3StatsFramesTooLong +=
4385 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4386 stats->etherStatsJabbers +=
4387 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4388 stats->etherStatsUndersizePkts +=
4389 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4390
4391 stats->FramesDroppedDueToFilters +=
4392 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4393 stats->DmaWriteQueueFull +=
4394 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4395 stats->DmaWriteHighPriQueueFull +=
4396 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4397 stats->NoMoreRxBDs +=
4398 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4399 /*
4400 * XXX
4401 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4402 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4403 * includes number of unwanted multicast frames. This comes
4404 * from silicon bug and known workaround to get rough(not
4405 * exact) counter is to enable interrupt on MBUF low water
4406 * attention. This can be accomplished by setting
4407 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4408 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4409 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4410 * However that change would generate more interrupts and
4411 * there are still possibilities of losing multiple frames
4412 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4413 * Given that the workaround still would not get correct
4414 * counter I don't think it's worth to implement it. So
4415 * ignore reading the counter on controllers that have the
4416 * silicon bug.
4417 */
4418 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4419 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4420 sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4421 stats->InputDiscards +=
4422 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4423 stats->InputErrors +=
4424 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4425 stats->RecvThresholdHit +=
4426 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4427
4428 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4429 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4430 stats->InputErrors);
4431}
4432
4433static void
4434bge_stats_clear_regs(struct bge_softc *sc)
4435{
4436
4437 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4438 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4439 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4440 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4441 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4442 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4443 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4444 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4445 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4446 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4447 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4448 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4449 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4450
4451 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4452 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4453 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4454 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4455 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4456 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4457 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4458 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4459 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4460 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4461 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4462 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4463 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4464 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4465
4466 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4467 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4468 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4469 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4470 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4471 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4472 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4473}
4474
4475static void
4476bge_stats_update(struct bge_softc *sc)
4477{
4478 struct ifnet *ifp;
4479 bus_size_t stats;
4480 uint32_t cnt; /* current register value */
4481
4482 ifp = sc->bge_ifp;
4483
4484 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4485
4486#define READ_STAT(sc, stats, stat) \
4487 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4488
4489 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4490 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4491 sc->bge_tx_collisions = cnt;
4492
4493 cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
4494 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_nobds);
4495 sc->bge_rx_nobds = cnt;
4496 cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
4497 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrs);
4498 sc->bge_rx_inerrs = cnt;
4499 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4500 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4501 sc->bge_rx_discards = cnt;
4502
4503 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4504 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4505 sc->bge_tx_discards = cnt;
4506
4507#undef READ_STAT
4508}
4509
4510/*
4511 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4512 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4513 * but when such padded frames employ the bge IP/TCP checksum offload,
4514 * the hardware checksum assist gives incorrect results (possibly
4515 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4516 * If we pad such runts with zeros, the onboard checksum comes out correct.
4517 */
4518static __inline int
4519bge_cksum_pad(struct mbuf *m)
4520{
4521 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4522 struct mbuf *last;
4523
4524 /* If there's only the packet-header and we can pad there, use it. */
4525 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4526 M_TRAILINGSPACE(m) >= padlen) {
4527 last = m;
4528 } else {
4529 /*
4530 * Walk packet chain to find last mbuf. We will either
4531 * pad there, or append a new mbuf and pad it.
4532 */
4533 for (last = m; last->m_next != NULL; last = last->m_next);
4534 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4535 /* Allocate new empty mbuf, pad it. Compact later. */
4536 struct mbuf *n;
4537
4538 MGET(n, M_DONTWAIT, MT_DATA);
4539 if (n == NULL)
4540 return (ENOBUFS);
4541 n->m_len = 0;
4542 last->m_next = n;
4543 last = n;
4544 }
4545 }
4546
4547 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4548 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4549 last->m_len += padlen;
4550 m->m_pkthdr.len += padlen;
4551
4552 return (0);
4553}
4554
4555static struct mbuf *
4556bge_check_short_dma(struct mbuf *m)
4557{
4558 struct mbuf *n;
4559 int found;
4560
4561 /*
4562 * If device receive two back-to-back send BDs with less than
4563 * or equal to 8 total bytes then the device may hang. The two
4564 * back-to-back send BDs must in the same frame for this failure
4565 * to occur. Scan mbuf chains and see whether two back-to-back
4566 * send BDs are there. If this is the case, allocate new mbuf
4567 * and copy the frame to workaround the silicon bug.
4568 */
4569 for (n = m, found = 0; n != NULL; n = n->m_next) {
4570 if (n->m_len < 8) {
4571 found++;
4572 if (found > 1)
4573 break;
4574 continue;
4575 }
4576 found = 0;
4577 }
4578
4579 if (found > 1) {
4580 n = m_defrag(m, M_DONTWAIT);
4581 if (n == NULL)
4582 m_freem(m);
4583 } else
4584 n = m;
4585 return (n);
4586}
4587
4588static struct mbuf *
4589bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4590 uint16_t *flags)
4591{
4592 struct ip *ip;
4593 struct tcphdr *tcp;
4594 struct mbuf *n;
4595 uint16_t hlen;
4596 uint32_t poff;
4597
4598 if (M_WRITABLE(m) == 0) {
4599 /* Get a writable copy. */
4600 n = m_dup(m, M_DONTWAIT);
4601 m_freem(m);
4602 if (n == NULL)
4603 return (NULL);
4604 m = n;
4605 }
4606 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4607 if (m == NULL)
4608 return (NULL);
4609 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4610 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4611 m = m_pullup(m, poff + sizeof(struct tcphdr));
4612 if (m == NULL)
4613 return (NULL);
4614 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4615 m = m_pullup(m, poff + (tcp->th_off << 2));
4616 if (m == NULL)
4617 return (NULL);
4618 /*
4619 * It seems controller doesn't modify IP length and TCP pseudo
4620 * checksum. These checksum computed by upper stack should be 0.
4621 */
4622 *mss = m->m_pkthdr.tso_segsz;
4623 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4624 ip->ip_sum = 0;
4625 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4626 /* Clear pseudo checksum computed by TCP stack. */
4627 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4628 tcp->th_sum = 0;
4629 /*
4630 * Broadcom controllers uses different descriptor format for
4631 * TSO depending on ASIC revision. Due to TSO-capable firmware
4632 * license issue and lower performance of firmware based TSO
4633 * we only support hardware based TSO.
4634 */
4635 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4636 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4637 if (sc->bge_flags & BGE_FLAG_TSO3) {
4638 /*
4639 * For BCM5717 and newer controllers, hardware based TSO
4640 * uses the 14 lower bits of the bge_mss field to store the
4641 * MSS and the upper 2 bits to store the lowest 2 bits of
4642 * the IP/TCP header length. The upper 6 bits of the header
4643 * length are stored in the bge_flags[14:10,4] field. Jumbo
4644 * frames are supported.
4645 */
4646 *mss |= ((hlen & 0x3) << 14);
4647 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4648 } else {
4649 /*
4650 * For BCM5755 and newer controllers, hardware based TSO uses
4651 * the lower 11 bits to store the MSS and the upper 5 bits to
4652 * store the IP/TCP header length. Jumbo frames are not
4653 * supported.
4654 */
4655 *mss |= (hlen << 11);
4656 }
4657 return (m);
4658}
4659
4660/*
4661 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4662 * pointers to descriptors.
4663 */
4664static int
4665bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4666{
4667 bus_dma_segment_t segs[BGE_NSEG_NEW];
4668 bus_dmamap_t map;
4669 struct bge_tx_bd *d;
4670 struct mbuf *m = *m_head;
4671 uint32_t idx = *txidx;
4672 uint16_t csum_flags, mss, vlan_tag;
4673 int nsegs, i, error;
4674
4675 csum_flags = 0;
4676 mss = 0;
4677 vlan_tag = 0;
4678 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4679 m->m_next != NULL) {
4680 *m_head = bge_check_short_dma(m);
4681 if (*m_head == NULL)
4682 return (ENOBUFS);
4683 m = *m_head;
4684 }
4685 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4686 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4687 if (*m_head == NULL)
4688 return (ENOBUFS);
4689 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4690 BGE_TXBDFLAG_CPU_POST_DMA;
4691 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4692 if (m->m_pkthdr.csum_flags & CSUM_IP)
4693 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4694 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4695 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4696 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4697 (error = bge_cksum_pad(m)) != 0) {
4698 m_freem(m);
4699 *m_head = NULL;
4700 return (error);
4701 }
4702 }
4703 if (m->m_flags & M_LASTFRAG)
4704 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4705 else if (m->m_flags & M_FRAG)
4706 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4707 }
4708
4709 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4710 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4711 m->m_pkthdr.len > ETHER_MAX_LEN)
4712 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4713 if (sc->bge_forced_collapse > 0 &&
4714 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4715 /*
4716 * Forcedly collapse mbuf chains to overcome hardware
4717 * limitation which only support a single outstanding
4718 * DMA read operation.
4719 */
4720 if (sc->bge_forced_collapse == 1)
4721 m = m_defrag(m, M_DONTWAIT);
4722 else
4723 m = m_collapse(m, M_DONTWAIT,
4724 sc->bge_forced_collapse);
4725 if (m == NULL)
4726 m = *m_head;
4727 *m_head = m;
4728 }
4729 }
4730
4731 map = sc->bge_cdata.bge_tx_dmamap[idx];
4732 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4733 &nsegs, BUS_DMA_NOWAIT);
4734 if (error == EFBIG) {
4735 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4736 if (m == NULL) {
4737 m_freem(*m_head);
4738 *m_head = NULL;
4739 return (ENOBUFS);
4740 }
4741 *m_head = m;
4742 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4743 m, segs, &nsegs, BUS_DMA_NOWAIT);
4744 if (error) {
4745 m_freem(m);
4746 *m_head = NULL;
4747 return (error);
4748 }
4749 } else if (error != 0)
4750 return (error);
4751
4752 /* Check if we have enough free send BDs. */
4753 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4754 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4755 return (ENOBUFS);
4756 }
4757
4758 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4759
4760 if (m->m_flags & M_VLANTAG) {
4761 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4762 vlan_tag = m->m_pkthdr.ether_vtag;
4763 }
4764 for (i = 0; ; i++) {
4765 d = &sc->bge_ldata.bge_tx_ring[idx];
4766 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4767 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4768 d->bge_len = segs[i].ds_len;
4769 d->bge_flags = csum_flags;
4770 d->bge_vlan_tag = vlan_tag;
4771 d->bge_mss = mss;
4772 if (i == nsegs - 1)
4773 break;
4774 BGE_INC(idx, BGE_TX_RING_CNT);
4775 }
4776
4777 /* Mark the last segment as end of packet... */
4778 d->bge_flags |= BGE_TXBDFLAG_END;
4779
4780 /*
4781 * Insure that the map for this transmission
4782 * is placed at the array index of the last descriptor
4783 * in this chain.
4784 */
4785 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4786 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4787 sc->bge_cdata.bge_tx_chain[idx] = m;
4788 sc->bge_txcnt += nsegs;
4789
4790 BGE_INC(idx, BGE_TX_RING_CNT);
4791 *txidx = idx;
4792
4793 return (0);
4794}
4795
4796/*
4797 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4798 * to the mbuf data regions directly in the transmit descriptors.
4799 */
4800static void
4801bge_start_locked(struct ifnet *ifp)
4802{
4803 struct bge_softc *sc;
4804 struct mbuf *m_head;
4805 uint32_t prodidx;
4806 int count;
4807
4808 sc = ifp->if_softc;
4809 BGE_LOCK_ASSERT(sc);
4810
4811 if (!sc->bge_link ||
4812 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4813 IFF_DRV_RUNNING)
4814 return;
4815
4816 prodidx = sc->bge_tx_prodidx;
4817
4818 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4819 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4820 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4821 break;
4822 }
4823 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4824 if (m_head == NULL)
4825 break;
4826
4827 /*
4828 * XXX
4829 * The code inside the if() block is never reached since we
4830 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4831 * requests to checksum TCP/UDP in a fragmented packet.
4832 *
4833 * XXX
4834 * safety overkill. If this is a fragmented packet chain
4835 * with delayed TCP/UDP checksums, then only encapsulate
4836 * it if we have enough descriptors to handle the entire
4837 * chain at once.
4838 * (paranoia -- may not actually be needed)
4839 */
4840 if (m_head->m_flags & M_FIRSTFRAG &&
4841 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4842 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4843 m_head->m_pkthdr.csum_data + 16) {
4844 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4845 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4846 break;
4847 }
4848 }
4849
4850 /*
4851 * Pack the data into the transmit ring. If we
4852 * don't have room, set the OACTIVE flag and wait
4853 * for the NIC to drain the ring.
4854 */
4855 if (bge_encap(sc, &m_head, &prodidx)) {
4856 if (m_head == NULL)
4857 break;
4858 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4859 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4860 break;
4861 }
4862 ++count;
4863
4864 /*
4865 * If there's a BPF listener, bounce a copy of this frame
4866 * to him.
4867 */
4868#ifdef ETHER_BPF_MTAP
4869 ETHER_BPF_MTAP(ifp, m_head);
4870#else
4871 BPF_MTAP(ifp, m_head);
4872#endif
4873 }
4874
4875 if (count > 0) {
4876 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4877 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4878 /* Transmit. */
4879 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4880 /* 5700 b2 errata */
4881 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4882 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4883
4884 sc->bge_tx_prodidx = prodidx;
4885
4886 /*
4887 * Set a timeout in case the chip goes out to lunch.
4888 */
4889 sc->bge_timer = 5;
4890 }
4891}
4892
4893/*
4894 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4895 * to the mbuf data regions directly in the transmit descriptors.
4896 */
4897static void
4898bge_start(struct ifnet *ifp)
4899{
4900 struct bge_softc *sc;
4901
4902 sc = ifp->if_softc;
4903 BGE_LOCK(sc);
4904 bge_start_locked(ifp);
4905 BGE_UNLOCK(sc);
4906}
4907
4908static void
4909bge_init_locked(struct bge_softc *sc)
4910{
4911 struct ifnet *ifp;
4912 uint16_t *m;
4913 uint32_t mode;
4914
4915 BGE_LOCK_ASSERT(sc);
4916
4917 ifp = sc->bge_ifp;
4918
4919 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4920 return;
4921
4922 /* Cancel pending I/O and flush buffers. */
4923 bge_stop(sc);
4924
4925 bge_stop_fw(sc);
4926 bge_sig_pre_reset(sc, BGE_RESET_START);
4927 bge_reset(sc);
4928 bge_sig_legacy(sc, BGE_RESET_START);
4929 bge_sig_post_reset(sc, BGE_RESET_START);
4930
4931 bge_chipinit(sc);
4932
4933 /*
4934 * Init the various state machines, ring
4935 * control blocks and firmware.
4936 */
4937 if (bge_blockinit(sc)) {
4938 device_printf(sc->bge_dev, "initialization failure\n");
4939 return;
4940 }
4941
4942 ifp = sc->bge_ifp;
4943
4944 /* Specify MTU. */
4945 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4946 ETHER_HDR_LEN + ETHER_CRC_LEN +
4947 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4948
4949 /* Load our MAC address. */
4950 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4951 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4952 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4953
4954 /* Program promiscuous mode. */
4955 bge_setpromisc(sc);
4956
4957 /* Program multicast filter. */
4958 bge_setmulti(sc);
4959
4960 /* Program VLAN tag stripping. */
4961 bge_setvlan(sc);
4962
4963 /* Override UDP checksum offloading. */
4964 if (sc->bge_forced_udpcsum == 0)
4965 sc->bge_csum_features &= ~CSUM_UDP;
4966 else
4967 sc->bge_csum_features |= CSUM_UDP;
4968 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4969 ifp->if_capenable & IFCAP_TXCSUM) {
4970 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4971 ifp->if_hwassist |= sc->bge_csum_features;
4972 }
4973
4974 /* Init RX ring. */
4975 if (bge_init_rx_ring_std(sc) != 0) {
4976 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4977 bge_stop(sc);
4978 return;
4979 }
4980
4981 /*
4982 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4983 * memory to insure that the chip has in fact read the first
4984 * entry of the ring.
4985 */
4986 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4987 uint32_t v, i;
4988 for (i = 0; i < 10; i++) {
4989 DELAY(20);
4990 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4991 if (v == (MCLBYTES - ETHER_ALIGN))
4992 break;
4993 }
4994 if (i == 10)
4995 device_printf (sc->bge_dev,
4996 "5705 A0 chip failed to load RX ring\n");
4997 }
4998
4999 /* Init jumbo RX ring. */
5000 if (BGE_IS_JUMBO_CAPABLE(sc) &&
5001 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
5002 (MCLBYTES - ETHER_ALIGN)) {
5003 if (bge_init_rx_ring_jumbo(sc) != 0) {
5004 device_printf(sc->bge_dev,
5005 "no memory for jumbo Rx buffers.\n");
5006 bge_stop(sc);
5007 return;
5008 }
5009 }
5010
5011 /* Init our RX return ring index. */
5012 sc->bge_rx_saved_considx = 0;
5013
5014 /* Init our RX/TX stat counters. */
5015 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
5016
5017 /* Init TX ring. */
5018 bge_init_tx_ring(sc);
5019
5020 /* Enable TX MAC state machine lockup fix. */
5021 mode = CSR_READ_4(sc, BGE_TX_MODE);
5022 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
5023 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5024 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
5025 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5026 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5027 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5028 }
5029 /* Turn on transmitter. */
5030 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5031 DELAY(100);
5032
5033 /* Turn on receiver. */
5034 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5035 DELAY(10);
5036
5037 /*
5038 * Set the number of good frames to receive after RX MBUF
5039 * Low Watermark has been reached. After the RX MAC receives
5040 * this number of frames, it will drop subsequent incoming
5041 * frames until the MBUF High Watermark is reached.
5042 */
5043 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
5044 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
5045 else
5046 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
5047
5048 /* Clear MAC statistics. */
5049 if (BGE_IS_5705_PLUS(sc))
5050 bge_stats_clear_regs(sc);
5051
5052 /* Tell firmware we're alive. */
5053 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5054
5055#ifdef DEVICE_POLLING
5056 /* Disable interrupts if we are polling. */
5057 if (ifp->if_capenable & IFCAP_POLLING) {
5058 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5059 BGE_PCIMISCCTL_MASK_PCI_INTR);
5060 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5061 } else
5062#endif
5063
5064 /* Enable host interrupts. */
5065 {
5066 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5067 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5068 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5069 }
5070
5071 ifp->if_drv_flags |= IFF_DRV_RUNNING;
5072 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5073
5074 bge_ifmedia_upd_locked(ifp);
5075
5076 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
5077}
5078
5079static void
5080bge_init(void *xsc)
5081{
5082 struct bge_softc *sc = xsc;
5083
5084 BGE_LOCK(sc);
5085 bge_init_locked(sc);
5086 BGE_UNLOCK(sc);
5087}
5088
5089/*
5090 * Set media options.
5091 */
5092static int
5093bge_ifmedia_upd(struct ifnet *ifp)
5094{
5095 struct bge_softc *sc = ifp->if_softc;
5096 int res;
5097
5098 BGE_LOCK(sc);
5099 res = bge_ifmedia_upd_locked(ifp);
5100 BGE_UNLOCK(sc);
5101
5102 return (res);
5103}
5104
5105static int
5106bge_ifmedia_upd_locked(struct ifnet *ifp)
5107{
5108 struct bge_softc *sc = ifp->if_softc;
5109 struct mii_data *mii;
5110 struct mii_softc *miisc;
5111 struct ifmedia *ifm;
5112
5113 BGE_LOCK_ASSERT(sc);
5114
5115 ifm = &sc->bge_ifmedia;
5116
5117 /* If this is a 1000baseX NIC, enable the TBI port. */
5118 if (sc->bge_flags & BGE_FLAG_TBI) {
5119 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5120 return (EINVAL);
5121 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5122 case IFM_AUTO:
5123 /*
5124 * The BCM5704 ASIC appears to have a special
5125 * mechanism for programming the autoneg
5126 * advertisement registers in TBI mode.
5127 */
5128 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5129 uint32_t sgdig;
5130 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5131 if (sgdig & BGE_SGDIGSTS_DONE) {
5132 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5133 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5134 sgdig |= BGE_SGDIGCFG_AUTO |
5135 BGE_SGDIGCFG_PAUSE_CAP |
5136 BGE_SGDIGCFG_ASYM_PAUSE;
5137 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5138 sgdig | BGE_SGDIGCFG_SEND);
5139 DELAY(5);
5140 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5141 }
5142 }
5143 break;
5144 case IFM_1000_SX:
5145 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5146 BGE_CLRBIT(sc, BGE_MAC_MODE,
5147 BGE_MACMODE_HALF_DUPLEX);
5148 } else {
5149 BGE_SETBIT(sc, BGE_MAC_MODE,
5150 BGE_MACMODE_HALF_DUPLEX);
5151 }
5152 DELAY(40);
5153 break;
5154 default:
5155 return (EINVAL);
5156 }
5157 return (0);
5158 }
5159
5160 sc->bge_link_evt++;
5161 mii = device_get_softc(sc->bge_miibus);
5162 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5163 PHY_RESET(miisc);
5164 mii_mediachg(mii);
5165
5166 /*
5167 * Force an interrupt so that we will call bge_link_upd
5168 * if needed and clear any pending link state attention.
5169 * Without this we are not getting any further interrupts
5170 * for link state changes and thus will not UP the link and
5171 * not be able to send in bge_start_locked. The only
5172 * way to get things working was to receive a packet and
5173 * get an RX intr.
5174 * bge_tick should help for fiber cards and we might not
5175 * need to do this here if BGE_FLAG_TBI is set but as
5176 * we poll for fiber anyway it should not harm.
5177 */
5178 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5179 sc->bge_flags & BGE_FLAG_5788)
5180 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5181 else
5182 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5183
5184 return (0);
5185}
5186
5187/*
5188 * Report current media status.
5189 */
5190static void
5191bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5192{
5193 struct bge_softc *sc = ifp->if_softc;
5194 struct mii_data *mii;
5195
5196 BGE_LOCK(sc);
5197
5198 if (sc->bge_flags & BGE_FLAG_TBI) {
5199 ifmr->ifm_status = IFM_AVALID;
5200 ifmr->ifm_active = IFM_ETHER;
5201 if (CSR_READ_4(sc, BGE_MAC_STS) &
5202 BGE_MACSTAT_TBI_PCS_SYNCHED)
5203 ifmr->ifm_status |= IFM_ACTIVE;
5204 else {
5205 ifmr->ifm_active |= IFM_NONE;
5206 BGE_UNLOCK(sc);
5207 return;
5208 }
5209 ifmr->ifm_active |= IFM_1000_SX;
5210 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5211 ifmr->ifm_active |= IFM_HDX;
5212 else
5213 ifmr->ifm_active |= IFM_FDX;
5214 BGE_UNLOCK(sc);
5215 return;
5216 }
5217
5218 mii = device_get_softc(sc->bge_miibus);
5219 mii_pollstat(mii);
5220 ifmr->ifm_active = mii->mii_media_active;
5221 ifmr->ifm_status = mii->mii_media_status;
5222
5223 BGE_UNLOCK(sc);
5224}
5225
5226static int
5227bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5228{
5229 struct bge_softc *sc = ifp->if_softc;
5230 struct ifreq *ifr = (struct ifreq *) data;
5231 struct mii_data *mii;
5232 int flags, mask, error = 0;
5233
5234 switch (command) {
5235 case SIOCSIFMTU:
5236 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5237 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5238 if (ifr->ifr_mtu < ETHERMIN ||
5239 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5240 error = EINVAL;
5241 break;
5242 }
5243 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5244 error = EINVAL;
5245 break;
5246 }
5247 BGE_LOCK(sc);
5248 if (ifp->if_mtu != ifr->ifr_mtu) {
5249 ifp->if_mtu = ifr->ifr_mtu;
5250 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5251 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5252 bge_init_locked(sc);
5253 }
5254 }
5255 BGE_UNLOCK(sc);
5256 break;
5257 case SIOCSIFFLAGS:
5258 BGE_LOCK(sc);
5259 if (ifp->if_flags & IFF_UP) {
5260 /*
5261 * If only the state of the PROMISC flag changed,
5262 * then just use the 'set promisc mode' command
5263 * instead of reinitializing the entire NIC. Doing
5264 * a full re-init means reloading the firmware and
5265 * waiting for it to start up, which may take a
5266 * second or two. Similarly for ALLMULTI.
5267 */
5268 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5269 flags = ifp->if_flags ^ sc->bge_if_flags;
5270 if (flags & IFF_PROMISC)
5271 bge_setpromisc(sc);
5272 if (flags & IFF_ALLMULTI)
5273 bge_setmulti(sc);
5274 } else
5275 bge_init_locked(sc);
5276 } else {
5277 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5278 bge_stop(sc);
5279 }
5280 }
5281 sc->bge_if_flags = ifp->if_flags;
5282 BGE_UNLOCK(sc);
5283 error = 0;
5284 break;
5285 case SIOCADDMULTI:
5286 case SIOCDELMULTI:
5287 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5288 BGE_LOCK(sc);
5289 bge_setmulti(sc);
5290 BGE_UNLOCK(sc);
5291 error = 0;
5292 }
5293 break;
5294 case SIOCSIFMEDIA:
5295 case SIOCGIFMEDIA:
5296 if (sc->bge_flags & BGE_FLAG_TBI) {
5297 error = ifmedia_ioctl(ifp, ifr,
5298 &sc->bge_ifmedia, command);
5299 } else {
5300 mii = device_get_softc(sc->bge_miibus);
5301 error = ifmedia_ioctl(ifp, ifr,
5302 &mii->mii_media, command);
5303 }
5304 break;
5305 case SIOCSIFCAP:
5306 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5307#ifdef DEVICE_POLLING
5308 if (mask & IFCAP_POLLING) {
5309 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5310 error = ether_poll_register(bge_poll, ifp);
5311 if (error)
5312 return (error);
5313 BGE_LOCK(sc);
5314 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5315 BGE_PCIMISCCTL_MASK_PCI_INTR);
5316 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5317 ifp->if_capenable |= IFCAP_POLLING;
5318 BGE_UNLOCK(sc);
5319 } else {
5320 error = ether_poll_deregister(ifp);
5321 /* Enable interrupt even in error case */
5322 BGE_LOCK(sc);
5323 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5324 BGE_PCIMISCCTL_MASK_PCI_INTR);
5325 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5326 ifp->if_capenable &= ~IFCAP_POLLING;
5327 BGE_UNLOCK(sc);
5328 }
5329 }
5330#endif
5331 if ((mask & IFCAP_TXCSUM) != 0 &&
5332 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5333 ifp->if_capenable ^= IFCAP_TXCSUM;
5334 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5335 ifp->if_hwassist |= sc->bge_csum_features;
5336 else
5337 ifp->if_hwassist &= ~sc->bge_csum_features;
5338 }
5339
5340 if ((mask & IFCAP_RXCSUM) != 0 &&
5341 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5342 ifp->if_capenable ^= IFCAP_RXCSUM;
5343
5344 if ((mask & IFCAP_TSO4) != 0 &&
5345 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5346 ifp->if_capenable ^= IFCAP_TSO4;
5347 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5348 ifp->if_hwassist |= CSUM_TSO;
5349 else
5350 ifp->if_hwassist &= ~CSUM_TSO;
5351 }
5352
5353 if (mask & IFCAP_VLAN_MTU) {
5354 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5355 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5356 bge_init(sc);
5357 }
5358
5359 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5360 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5361 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5362 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5363 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5364 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5365 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5366 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5367 BGE_LOCK(sc);
5368 bge_setvlan(sc);
5369 BGE_UNLOCK(sc);
5370 }
5371#ifdef VLAN_CAPABILITIES
5372 VLAN_CAPABILITIES(ifp);
5373#endif
5374 break;
5375 default:
5376 error = ether_ioctl(ifp, command, data);
5377 break;
5378 }
5379
5380 return (error);
5381}
5382
5383static void
5384bge_watchdog(struct bge_softc *sc)
5385{
5386 struct ifnet *ifp;
5387
5388 BGE_LOCK_ASSERT(sc);
5389
5390 if (sc->bge_timer == 0 || --sc->bge_timer)
5391 return;
5392
5393 ifp = sc->bge_ifp;
5394
5395 if_printf(ifp, "watchdog timeout -- resetting\n");
5396
5397 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5398 bge_init_locked(sc);
5399
5400 ifp->if_oerrors++;
5401}
5402
5403static void
5404bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5405{
5406 int i;
5407
5408 BGE_CLRBIT(sc, reg, bit);
5409
5410 for (i = 0; i < BGE_TIMEOUT; i++) {
5411 if ((CSR_READ_4(sc, reg) & bit) == 0)
5412 return;
5413 DELAY(100);
5414 }
5415}
5416
5417/*
5418 * Stop the adapter and free any mbufs allocated to the
5419 * RX and TX lists.
5420 */
5421static void
5422bge_stop(struct bge_softc *sc)
5423{
5424 struct ifnet *ifp;
5425
5426 BGE_LOCK_ASSERT(sc);
5427
5428 ifp = sc->bge_ifp;
5429
5430 callout_stop(&sc->bge_stat_ch);
5431
5432 /* Disable host interrupts. */
5433 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5434 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5435
5436 /*
5437 * Tell firmware we're shutting down.
5438 */
5439 bge_stop_fw(sc);
5440 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5441
5442 /*
5443 * Disable all of the receiver blocks.
5444 */
5445 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5446 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5447 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5448 if (BGE_IS_5700_FAMILY(sc))
5449 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5450 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5451 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5452 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5453
5454 /*
5455 * Disable all of the transmit blocks.
5456 */
5457 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5458 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5459 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5460 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5461 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5462 if (BGE_IS_5700_FAMILY(sc))
5463 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5464 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5465
5466 /*
5467 * Shut down all of the memory managers and related
5468 * state machines.
5469 */
5470 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5471 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5472 if (BGE_IS_5700_FAMILY(sc))
5473 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5474
5475 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5476 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5477 if (!(BGE_IS_5705_PLUS(sc))) {
5478 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5479 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5480 }
5481 /* Update MAC statistics. */
5482 if (BGE_IS_5705_PLUS(sc))
5483 bge_stats_update_regs(sc);
5484
5485 bge_reset(sc);
5486 bge_sig_legacy(sc, BGE_RESET_STOP);
5487 bge_sig_post_reset(sc, BGE_RESET_STOP);
5488
5489 /*
5490 * Keep the ASF firmware running if up.
5491 */
5492 if (sc->bge_asf_mode & ASF_STACKUP)
5493 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5494 else
5495 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5496
5497 /* Free the RX lists. */
5498 bge_free_rx_ring_std(sc);
5499
5500 /* Free jumbo RX list. */
5501 if (BGE_IS_JUMBO_CAPABLE(sc))
5502 bge_free_rx_ring_jumbo(sc);
5503
5504 /* Free TX buffers. */
5505 bge_free_tx_ring(sc);
5506
5507 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5508
5509 /* Clear MAC's link state (PHY may still have link UP). */
5510 if (bootverbose && sc->bge_link)
5511 if_printf(sc->bge_ifp, "link DOWN\n");
5512 sc->bge_link = 0;
5513
5514 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5515}
5516
5517/*
5518 * Stop all chip I/O so that the kernel's probe routines don't
5519 * get confused by errant DMAs when rebooting.
5520 */
5521static int
5522bge_shutdown(device_t dev)
5523{
5524 struct bge_softc *sc;
5525
5526 sc = device_get_softc(dev);
5527 BGE_LOCK(sc);
5528 bge_stop(sc);
5529 bge_reset(sc);
5530 BGE_UNLOCK(sc);
5531
5532 return (0);
5533}
5534
5535static int
5536bge_suspend(device_t dev)
5537{
5538 struct bge_softc *sc;
5539
5540 sc = device_get_softc(dev);
5541 BGE_LOCK(sc);
5542 bge_stop(sc);
5543 BGE_UNLOCK(sc);
5544
5545 return (0);
5546}
5547
5548static int
5549bge_resume(device_t dev)
5550{
5551 struct bge_softc *sc;
5552 struct ifnet *ifp;
5553
5554 sc = device_get_softc(dev);
5555 BGE_LOCK(sc);
5556 ifp = sc->bge_ifp;
5557 if (ifp->if_flags & IFF_UP) {
5558 bge_init_locked(sc);
5559 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5560 bge_start_locked(ifp);
5561 }
5562 BGE_UNLOCK(sc);
5563
5564 return (0);
5565}
5566
5567static void
5568bge_link_upd(struct bge_softc *sc)
5569{
5570 struct mii_data *mii;
5571 uint32_t link, status;
5572
5573 BGE_LOCK_ASSERT(sc);
5574
5575 /* Clear 'pending link event' flag. */
5576 sc->bge_link_evt = 0;
5577
5578 /*
5579 * Process link state changes.
5580 * Grrr. The link status word in the status block does
5581 * not work correctly on the BCM5700 rev AX and BX chips,
5582 * according to all available information. Hence, we have
5583 * to enable MII interrupts in order to properly obtain
5584 * async link changes. Unfortunately, this also means that
5585 * we have to read the MAC status register to detect link
5586 * changes, thereby adding an additional register access to
5587 * the interrupt handler.
5588 *
5589 * XXX: perhaps link state detection procedure used for
5590 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5591 */
5592
5593 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5594 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5595 status = CSR_READ_4(sc, BGE_MAC_STS);
5596 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5597 mii = device_get_softc(sc->bge_miibus);
5598 mii_pollstat(mii);
5599 if (!sc->bge_link &&
5600 mii->mii_media_status & IFM_ACTIVE &&
5601 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5602 sc->bge_link++;
5603 if (bootverbose)
5604 if_printf(sc->bge_ifp, "link UP\n");
5605 } else if (sc->bge_link &&
5606 (!(mii->mii_media_status & IFM_ACTIVE) ||
5607 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5608 sc->bge_link = 0;
5609 if (bootverbose)
5610 if_printf(sc->bge_ifp, "link DOWN\n");
5611 }
5612
5613 /* Clear the interrupt. */
5614 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5615 BGE_EVTENB_MI_INTERRUPT);
5616 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5617 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5618 BRGPHY_INTRS);
5619 }
5620 return;
5621 }
5622
5623 if (sc->bge_flags & BGE_FLAG_TBI) {
5624 status = CSR_READ_4(sc, BGE_MAC_STS);
5625 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5626 if (!sc->bge_link) {
5627 sc->bge_link++;
5628 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5629 BGE_CLRBIT(sc, BGE_MAC_MODE,
5630 BGE_MACMODE_TBI_SEND_CFGS);
5631 DELAY(40);
5632 }
5633 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5634 if (bootverbose)
5635 if_printf(sc->bge_ifp, "link UP\n");
5636 if_link_state_change(sc->bge_ifp,
5637 LINK_STATE_UP);
5638 }
5639 } else if (sc->bge_link) {
5640 sc->bge_link = 0;
5641 if (bootverbose)
5642 if_printf(sc->bge_ifp, "link DOWN\n");
5643 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5644 }
5645 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5646 /*
5647 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5648 * in status word always set. Workaround this bug by reading
5649 * PHY link status directly.
5650 */
5651 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5652
5653 if (link != sc->bge_link ||
5654 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5655 mii = device_get_softc(sc->bge_miibus);
5656 mii_pollstat(mii);
5657 if (!sc->bge_link &&
5658 mii->mii_media_status & IFM_ACTIVE &&
5659 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5660 sc->bge_link++;
5661 if (bootverbose)
5662 if_printf(sc->bge_ifp, "link UP\n");
5663 } else if (sc->bge_link &&
5664 (!(mii->mii_media_status & IFM_ACTIVE) ||
5665 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5666 sc->bge_link = 0;
5667 if (bootverbose)
5668 if_printf(sc->bge_ifp, "link DOWN\n");
5669 }
5670 }
5671 } else {
5672 /*
5673 * For controllers that call mii_tick, we have to poll
5674 * link status.
5675 */
5676 mii = device_get_softc(sc->bge_miibus);
5677 mii_pollstat(mii);
5678 bge_miibus_statchg(sc->bge_dev);
5679 }
5680
5681 /* Clear the attention. */
5682 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5683 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5684 BGE_MACSTAT_LINK_CHANGED);
5685}
5686
5687static void
5688bge_add_sysctls(struct bge_softc *sc)
5689{
5690 struct sysctl_ctx_list *ctx;
5691 struct sysctl_oid_list *children;
5692 char tn[32];
5693 int unit;
5694
5695 ctx = device_get_sysctl_ctx(sc->bge_dev);
5696 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5697
5698#ifdef BGE_REGISTER_DEBUG
5699 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5700 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5701 "Debug Information");
5702
5703 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5704 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5705 "Register Read");
5706
5707 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5708 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5709 "Memory Read");
5710
5711#endif
5712
5713 unit = device_get_unit(sc->bge_dev);
5714 /*
5715 * A common design characteristic for many Broadcom client controllers
5716 * is that they only support a single outstanding DMA read operation
5717 * on the PCIe bus. This means that it will take twice as long to fetch
5718 * a TX frame that is split into header and payload buffers as it does
5719 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5720 * these controllers, coalescing buffers to reduce the number of memory
5721 * reads is effective way to get maximum performance(about 940Mbps).
5722 * Without collapsing TX buffers the maximum TCP bulk transfer
5723 * performance is about 850Mbps. However forcing coalescing mbufs
5724 * consumes a lot of CPU cycles, so leave it off by default.
5725 */
5726 sc->bge_forced_collapse = 0;
5727 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5728 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5729 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5730 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5731 "Number of fragmented TX buffers of a frame allowed before "
5732 "forced collapsing");
5733
5734 sc->bge_msi = 1;
5735 snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
5736 TUNABLE_INT_FETCH(tn, &sc->bge_msi);
5737 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
5738 CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
5739
5740 /*
5741 * It seems all Broadcom controllers have a bug that can generate UDP
5742 * datagrams with checksum value 0 when TX UDP checksum offloading is
5743 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5744 * Even though the probability of generating such UDP datagrams is
5745 * low, I don't want to see FreeBSD boxes to inject such datagrams
5746 * into network so disable UDP checksum offloading by default. Users
5747 * still override this behavior by setting a sysctl variable,
5748 * dev.bge.0.forced_udpcsum.
5749 */
5750 sc->bge_forced_udpcsum = 0;
5751 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5752 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5753 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5754 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5755 "Enable UDP checksum offloading even if controller can "
5756 "generate UDP checksum value 0");
5757
5758 if (BGE_IS_5705_PLUS(sc))
5759 bge_add_sysctl_stats_regs(sc, ctx, children);
5760 else
5761 bge_add_sysctl_stats(sc, ctx, children);
5762}
5763
5764#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5765 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5766 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5767 desc)
5768
5769static void
5770bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5771 struct sysctl_oid_list *parent)
5772{
5773 struct sysctl_oid *tree;
5774 struct sysctl_oid_list *children, *schildren;
5775
5776 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5777 NULL, "BGE Statistics");
5778 schildren = children = SYSCTL_CHILDREN(tree);
5779 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5780 children, COSFramesDroppedDueToFilters,
5781 "FramesDroppedDueToFilters");
5782 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5783 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5784 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5785 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5786 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5787 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5788 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5789 children, ifInDiscards, "InputDiscards");
5790 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5791 children, ifInErrors, "InputErrors");
5792 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5793 children, nicRecvThresholdHit, "RecvThresholdHit");
5794 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5795 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5796 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5797 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5798 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5799 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5800 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5801 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5802 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5803 children, nicRingStatusUpdate, "RingStatusUpdate");
5804 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5805 children, nicInterrupts, "Interrupts");
5806 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5807 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5808 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5809 children, nicSendThresholdHit, "SendThresholdHit");
5810
5811 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5812 NULL, "BGE RX Statistics");
5813 children = SYSCTL_CHILDREN(tree);
5814 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5815 children, rxstats.ifHCInOctets, "ifHCInOctets");
5816 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5817 children, rxstats.etherStatsFragments, "Fragments");
5818 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5819 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5820 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5821 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5822 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5823 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5824 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5825 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5826 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5827 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5828 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5829 children, rxstats.xoffPauseFramesReceived,
5830 "xoffPauseFramesReceived");
5831 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5832 children, rxstats.macControlFramesReceived,
5833 "ControlFramesReceived");
5834 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5835 children, rxstats.xoffStateEntered, "xoffStateEntered");
5836 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5837 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5838 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5839 children, rxstats.etherStatsJabbers, "Jabbers");
5840 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5841 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5842 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5843 children, rxstats.inRangeLengthError, "inRangeLengthError");
5844 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5845 children, rxstats.outRangeLengthError, "outRangeLengthError");
5846
5847 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5848 NULL, "BGE TX Statistics");
5849 children = SYSCTL_CHILDREN(tree);
5850 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5851 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5852 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5853 children, txstats.etherStatsCollisions, "Collisions");
5854 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5855 children, txstats.outXonSent, "XonSent");
5856 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5857 children, txstats.outXoffSent, "XoffSent");
5858 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5859 children, txstats.flowControlDone, "flowControlDone");
5860 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5861 children, txstats.dot3StatsInternalMacTransmitErrors,
5862 "InternalMacTransmitErrors");
5863 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5864 children, txstats.dot3StatsSingleCollisionFrames,
5865 "SingleCollisionFrames");
5866 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5867 children, txstats.dot3StatsMultipleCollisionFrames,
5868 "MultipleCollisionFrames");
5869 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5870 children, txstats.dot3StatsDeferredTransmissions,
5871 "DeferredTransmissions");
5872 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5873 children, txstats.dot3StatsExcessiveCollisions,
5874 "ExcessiveCollisions");
5875 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5876 children, txstats.dot3StatsLateCollisions,
5877 "LateCollisions");
5878 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5879 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5880 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5881 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5882 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5883 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5884 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5885 children, txstats.dot3StatsCarrierSenseErrors,
5886 "CarrierSenseErrors");
5887 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5888 children, txstats.ifOutDiscards, "Discards");
5889 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5890 children, txstats.ifOutErrors, "Errors");
5891}
5892
5893#undef BGE_SYSCTL_STAT
5894
5895#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5896 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5897
5898static void
5899bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5900 struct sysctl_oid_list *parent)
5901{
5902 struct sysctl_oid *tree;
5903 struct sysctl_oid_list *child, *schild;
5904 struct bge_mac_stats *stats;
5905
5906 stats = &sc->bge_mac_stats;
5907 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5908 NULL, "BGE Statistics");
5909 schild = child = SYSCTL_CHILDREN(tree);
5910 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5911 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5912 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5913 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5914 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5915 &stats->DmaWriteHighPriQueueFull,
5916 "NIC DMA Write High Priority Queue Full");
5917 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5918 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5919 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5920 &stats->InputDiscards, "Discarded Input Frames");
5921 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5922 &stats->InputErrors, "Input Errors");
5923 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5924 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5925
5926 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5927 NULL, "BGE RX Statistics");
5928 child = SYSCTL_CHILDREN(tree);
5929 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5930 &stats->ifHCInOctets, "Inbound Octets");
5931 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5932 &stats->etherStatsFragments, "Fragments");
5933 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5934 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5935 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5936 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5937 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5938 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5939 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5940 &stats->dot3StatsFCSErrors, "FCS Errors");
5941 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5942 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5943 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5944 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5945 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5946 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5947 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5948 &stats->macControlFramesReceived, "MAC Control Frames Received");
5949 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5950 &stats->xoffStateEntered, "XOFF State Entered");
5951 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5952 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5953 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5954 &stats->etherStatsJabbers, "Jabbers");
5955 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5956 &stats->etherStatsUndersizePkts, "Undersized Packets");
5957
5958 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5959 NULL, "BGE TX Statistics");
5960 child = SYSCTL_CHILDREN(tree);
5961 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5962 &stats->ifHCOutOctets, "Outbound Octets");
5963 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5964 &stats->etherStatsCollisions, "TX Collisions");
5965 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5966 &stats->outXonSent, "XON Sent");
5967 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5968 &stats->outXoffSent, "XOFF Sent");
5969 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5970 &stats->dot3StatsInternalMacTransmitErrors,
5971 "Internal MAC TX Errors");
5972 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5973 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5974 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5975 &stats->dot3StatsMultipleCollisionFrames,
5976 "Multiple Collision Frames");
5977 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5978 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5979 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5980 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5981 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5982 &stats->dot3StatsLateCollisions, "Late Collisions");
5983 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5984 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5985 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5986 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5987 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5988 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5989}
5990
5991#undef BGE_SYSCTL_STAT_ADD64
5992
5993static int
5994bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5995{
5996 struct bge_softc *sc;
5997 uint32_t result;
5998 int offset;
5999
6000 sc = (struct bge_softc *)arg1;
6001 offset = arg2;
6002 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
6003 offsetof(bge_hostaddr, bge_addr_lo));
6004 return (sysctl_handle_int(oidp, &result, 0, req));
6005}
6006
6007#ifdef BGE_REGISTER_DEBUG
6008static int
6009bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
6010{
6011 struct bge_softc *sc;
6012 uint16_t *sbdata;
6013 int error, result, sbsz;
6014 int i, j;
6015
6016 result = -1;
6017 error = sysctl_handle_int(oidp, &result, 0, req);
6018 if (error || (req->newptr == NULL))
6019 return (error);
6020
6021 if (result == 1) {
6022 sc = (struct bge_softc *)arg1;
6023
6024 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6025 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
6026 sbsz = BGE_STATUS_BLK_SZ;
6027 else
6028 sbsz = 32;
6029 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
6030 printf("Status Block:\n");
6031 BGE_LOCK(sc);
6032 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6033 sc->bge_cdata.bge_status_map,
6034 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6035 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
6036 printf("%06x:", i);
6037 for (j = 0; j < 8; j++)
6038 printf(" %04x", sbdata[i++]);
6039 printf("\n");
6040 }
6041
6042 printf("Registers:\n");
6043 for (i = 0x800; i < 0xA00; ) {
6044 printf("%06x:", i);
6045 for (j = 0; j < 8; j++) {
6046 printf(" %08x", CSR_READ_4(sc, i));
6047 i += 4;
6048 }
6049 printf("\n");
6050 }
6051 BGE_UNLOCK(sc);
6052
6053 printf("Hardware Flags:\n");
6054 if (BGE_IS_5717_PLUS(sc))
6055 printf(" - 5717 Plus\n");
6056 if (BGE_IS_5755_PLUS(sc))
6057 printf(" - 5755 Plus\n");
6058 if (BGE_IS_575X_PLUS(sc))
6059 printf(" - 575X Plus\n");
6060 if (BGE_IS_5705_PLUS(sc))
6061 printf(" - 5705 Plus\n");
6062 if (BGE_IS_5714_FAMILY(sc))
6063 printf(" - 5714 Family\n");
6064 if (BGE_IS_5700_FAMILY(sc))
6065 printf(" - 5700 Family\n");
6066 if (sc->bge_flags & BGE_FLAG_JUMBO)
6067 printf(" - Supports Jumbo Frames\n");
6068 if (sc->bge_flags & BGE_FLAG_PCIX)
6069 printf(" - PCI-X Bus\n");
6070 if (sc->bge_flags & BGE_FLAG_PCIE)
6071 printf(" - PCI Express Bus\n");
6072 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
6073 printf(" - No 3 LEDs\n");
6074 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
6075 printf(" - RX Alignment Bug\n");
6076 }
6077
6078 return (error);
6079}
6080
6081static int
6082bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6083{
6084 struct bge_softc *sc;
6085 int error;
6086 uint16_t result;
6087 uint32_t val;
6088
6089 result = -1;
6090 error = sysctl_handle_int(oidp, &result, 0, req);
6091 if (error || (req->newptr == NULL))
6092 return (error);
6093
6094 if (result < 0x8000) {
6095 sc = (struct bge_softc *)arg1;
6096 val = CSR_READ_4(sc, result);
6097 printf("reg 0x%06X = 0x%08X\n", result, val);
6098 }
6099
6100 return (error);
6101}
6102
6103static int
6104bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6105{
6106 struct bge_softc *sc;
6107 int error;
6108 uint16_t result;
6109 uint32_t val;
6110
6111 result = -1;
6112 error = sysctl_handle_int(oidp, &result, 0, req);
6113 if (error || (req->newptr == NULL))
6114 return (error);
6115
6116 if (result < 0x8000) {
6117 sc = (struct bge_softc *)arg1;
6118 val = bge_readmem_ind(sc, result);
6119 printf("mem 0x%06X = 0x%08X\n", result, val);
6120 }
6121
6122 return (error);
6123}
6124#endif
6125
6126static int
6127bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6128{
6129
6130 if (sc->bge_flags & BGE_FLAG_EADDR)
6131 return (1);
6132
6133#ifdef __sparc64__
6134 OF_getetheraddr(sc->bge_dev, ether_addr);
6135 return (0);
6136#endif
6137 return (1);
6138}
6139
6140static int
6141bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6142{
6143 uint32_t mac_addr;
6144
6145 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6146 if ((mac_addr >> 16) == 0x484b) {
6147 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6148 ether_addr[1] = (uint8_t)mac_addr;
6149 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6150 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6151 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6152 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6153 ether_addr[5] = (uint8_t)mac_addr;
6154 return (0);
6155 }
6156 return (1);
6157}
6158
6159static int
6160bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6161{
6162 int mac_offset = BGE_EE_MAC_OFFSET;
6163
6164 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6165 mac_offset = BGE_EE_MAC_OFFSET_5906;
6166
6167 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6168 ETHER_ADDR_LEN));
6169}
6170
6171static int
6172bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6173{
6174
6175 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6176 return (1);
6177
6178 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6179 ETHER_ADDR_LEN));
6180}
6181
6182static int
6183bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6184{
6185 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6186 /* NOTE: Order is critical */
6187 bge_get_eaddr_fw,
6188 bge_get_eaddr_mem,
6189 bge_get_eaddr_nvram,
6190 bge_get_eaddr_eeprom,
6191 NULL
6192 };
6193 const bge_eaddr_fcn_t *func;
6194
6195 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6196 if ((*func)(sc, eaddr) == 0)
6197 break;
6198 }
6199 return (*func == NULL ? ENXIO : 0);
6200}
3653 /* Clear error status. */
3654 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA,
3655 PCIEM_STA_CORRECTABLE_ERROR |
3656 PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
3657 PCIEM_STA_UNSUPPORTED_REQ, 2);
3658 }
3659
3660 /* Reset some of the PCI state that got zapped by reset. */
3661 pci_write_config(dev, BGE_PCI_MISC_CTL,
3662 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3663 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3664 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3665 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3666 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3667 /*
3668 * Disable PCI-X relaxed ordering to ensure status block update
3669 * comes first then packet buffer DMA. Otherwise driver may
3670 * read stale status block.
3671 */
3672 if (sc->bge_flags & BGE_FLAG_PCIX) {
3673 devctl = pci_read_config(dev,
3674 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3675 devctl &= ~PCIXM_COMMAND_ERO;
3676 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3677 devctl &= ~PCIXM_COMMAND_MAX_READ;
3678 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3679 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3680 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3681 PCIXM_COMMAND_MAX_READ);
3682 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3683 }
3684 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3685 devctl, 2);
3686 }
3687 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3688 if (BGE_IS_5714_FAMILY(sc)) {
3689 /* This chip disables MSI on reset. */
3690 if (sc->bge_flags & BGE_FLAG_MSI) {
3691 val = pci_read_config(dev,
3692 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3693 pci_write_config(dev,
3694 sc->bge_msicap + PCIR_MSI_CTRL,
3695 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3696 val = CSR_READ_4(sc, BGE_MSI_MODE);
3697 CSR_WRITE_4(sc, BGE_MSI_MODE,
3698 val | BGE_MSIMODE_ENABLE);
3699 }
3700 val = CSR_READ_4(sc, BGE_MARB_MODE);
3701 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3702 } else
3703 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3704
3705 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3706 for (i = 0; i < BGE_TIMEOUT; i++) {
3707 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3708 if (val & BGE_VCPU_STATUS_INIT_DONE)
3709 break;
3710 DELAY(100);
3711 }
3712 if (i == BGE_TIMEOUT) {
3713 device_printf(dev, "reset timed out\n");
3714 return (1);
3715 }
3716 } else {
3717 /*
3718 * Poll until we see the 1's complement of the magic number.
3719 * This indicates that the firmware initialization is complete.
3720 * We expect this to fail if no chip containing the Ethernet
3721 * address is fitted though.
3722 */
3723 for (i = 0; i < BGE_TIMEOUT; i++) {
3724 DELAY(10);
3725 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3726 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3727 break;
3728 }
3729
3730 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3731 device_printf(dev,
3732 "firmware handshake timed out, found 0x%08x\n",
3733 val);
3734 /* BCM57765 A0 needs additional time before accessing. */
3735 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3736 DELAY(10 * 1000); /* XXX */
3737 }
3738
3739 /*
3740 * XXX Wait for the value of the PCISTATE register to
3741 * return to its original pre-reset state. This is a
3742 * fairly good indicator of reset completion. If we don't
3743 * wait for the reset to fully complete, trying to read
3744 * from the device's non-PCI registers may yield garbage
3745 * results.
3746 */
3747 for (i = 0; i < BGE_TIMEOUT; i++) {
3748 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3749 break;
3750 DELAY(10);
3751 }
3752
3753 /* Fix up byte swapping. */
3754 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3755
3756 /* Tell the ASF firmware we are up */
3757 if (sc->bge_asf_mode & ASF_STACKUP)
3758 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3759
3760 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3761 DELAY(40);
3762
3763 /*
3764 * The 5704 in TBI mode apparently needs some special
3765 * adjustment to insure the SERDES drive level is set
3766 * to 1.2V.
3767 */
3768 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3769 sc->bge_flags & BGE_FLAG_TBI) {
3770 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3771 val = (val & ~0xFFF) | 0x880;
3772 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3773 }
3774
3775 /* XXX: Broadcom Linux driver. */
3776 if (sc->bge_flags & BGE_FLAG_PCIE &&
3777 !BGE_IS_5717_PLUS(sc) &&
3778 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3779 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3780 /* Enable Data FIFO protection. */
3781 val = CSR_READ_4(sc, 0x7C00);
3782 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3783 }
3784 DELAY(10000);
3785
3786 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
3787 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3788 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3789
3790 return (0);
3791}
3792
3793static __inline void
3794bge_rxreuse_std(struct bge_softc *sc, int i)
3795{
3796 struct bge_rx_bd *r;
3797
3798 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3799 r->bge_flags = BGE_RXBDFLAG_END;
3800 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3801 r->bge_idx = i;
3802 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3803}
3804
3805static __inline void
3806bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3807{
3808 struct bge_extrx_bd *r;
3809
3810 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3811 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3812 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3813 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3814 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3815 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3816 r->bge_idx = i;
3817 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3818}
3819
3820/*
3821 * Frame reception handling. This is called if there's a frame
3822 * on the receive return list.
3823 *
3824 * Note: we have to be able to handle two possibilities here:
3825 * 1) the frame is from the jumbo receive ring
3826 * 2) the frame is from the standard receive ring
3827 */
3828
3829static int
3830bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3831{
3832 struct ifnet *ifp;
3833 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3834 uint16_t rx_cons;
3835
3836 rx_cons = sc->bge_rx_saved_considx;
3837
3838 /* Nothing to do. */
3839 if (rx_cons == rx_prod)
3840 return (rx_npkts);
3841
3842 ifp = sc->bge_ifp;
3843
3844 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3845 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3846 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3847 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3848 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3849 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3850 (MCLBYTES - ETHER_ALIGN))
3851 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3852 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3853
3854 while (rx_cons != rx_prod) {
3855 struct bge_rx_bd *cur_rx;
3856 uint32_t rxidx;
3857 struct mbuf *m = NULL;
3858 uint16_t vlan_tag = 0;
3859 int have_tag = 0;
3860
3861#ifdef DEVICE_POLLING
3862 if (ifp->if_capenable & IFCAP_POLLING) {
3863 if (sc->rxcycles <= 0)
3864 break;
3865 sc->rxcycles--;
3866 }
3867#endif
3868
3869 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3870
3871 rxidx = cur_rx->bge_idx;
3872 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3873
3874 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3875 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3876 have_tag = 1;
3877 vlan_tag = cur_rx->bge_vlan_tag;
3878 }
3879
3880 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3881 jumbocnt++;
3882 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3883 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3884 bge_rxreuse_jumbo(sc, rxidx);
3885 continue;
3886 }
3887 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3888 bge_rxreuse_jumbo(sc, rxidx);
3889 ifp->if_iqdrops++;
3890 continue;
3891 }
3892 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3893 } else {
3894 stdcnt++;
3895 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3896 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3897 bge_rxreuse_std(sc, rxidx);
3898 continue;
3899 }
3900 if (bge_newbuf_std(sc, rxidx) != 0) {
3901 bge_rxreuse_std(sc, rxidx);
3902 ifp->if_iqdrops++;
3903 continue;
3904 }
3905 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3906 }
3907
3908 ifp->if_ipackets++;
3909#ifndef __NO_STRICT_ALIGNMENT
3910 /*
3911 * For architectures with strict alignment we must make sure
3912 * the payload is aligned.
3913 */
3914 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3915 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3916 cur_rx->bge_len);
3917 m->m_data += ETHER_ALIGN;
3918 }
3919#endif
3920 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3921 m->m_pkthdr.rcvif = ifp;
3922
3923 if (ifp->if_capenable & IFCAP_RXCSUM)
3924 bge_rxcsum(sc, cur_rx, m);
3925
3926 /*
3927 * If we received a packet with a vlan tag,
3928 * attach that information to the packet.
3929 */
3930 if (have_tag) {
3931 m->m_pkthdr.ether_vtag = vlan_tag;
3932 m->m_flags |= M_VLANTAG;
3933 }
3934
3935 if (holdlck != 0) {
3936 BGE_UNLOCK(sc);
3937 (*ifp->if_input)(ifp, m);
3938 BGE_LOCK(sc);
3939 } else
3940 (*ifp->if_input)(ifp, m);
3941 rx_npkts++;
3942
3943 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3944 return (rx_npkts);
3945 }
3946
3947 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3948 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3949 if (stdcnt > 0)
3950 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3951 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3952
3953 if (jumbocnt > 0)
3954 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3955 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3956
3957 sc->bge_rx_saved_considx = rx_cons;
3958 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3959 if (stdcnt)
3960 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3961 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3962 if (jumbocnt)
3963 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3964 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3965#ifdef notyet
3966 /*
3967 * This register wraps very quickly under heavy packet drops.
3968 * If you need correct statistics, you can enable this check.
3969 */
3970 if (BGE_IS_5705_PLUS(sc))
3971 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3972#endif
3973 return (rx_npkts);
3974}
3975
3976static void
3977bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3978{
3979
3980 if (BGE_IS_5717_PLUS(sc)) {
3981 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3982 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3983 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3984 if ((cur_rx->bge_error_flag &
3985 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3986 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3987 }
3988 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3989 m->m_pkthdr.csum_data =
3990 cur_rx->bge_tcp_udp_csum;
3991 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3992 CSUM_PSEUDO_HDR;
3993 }
3994 }
3995 } else {
3996 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3997 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3998 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3999 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4000 }
4001 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4002 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
4003 m->m_pkthdr.csum_data =
4004 cur_rx->bge_tcp_udp_csum;
4005 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4006 CSUM_PSEUDO_HDR;
4007 }
4008 }
4009}
4010
4011static void
4012bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
4013{
4014 struct bge_tx_bd *cur_tx;
4015 struct ifnet *ifp;
4016
4017 BGE_LOCK_ASSERT(sc);
4018
4019 /* Nothing to do. */
4020 if (sc->bge_tx_saved_considx == tx_cons)
4021 return;
4022
4023 ifp = sc->bge_ifp;
4024
4025 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4026 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
4027 /*
4028 * Go through our tx ring and free mbufs for those
4029 * frames that have been sent.
4030 */
4031 while (sc->bge_tx_saved_considx != tx_cons) {
4032 uint32_t idx;
4033
4034 idx = sc->bge_tx_saved_considx;
4035 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
4036 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4037 ifp->if_opackets++;
4038 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
4039 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
4040 sc->bge_cdata.bge_tx_dmamap[idx],
4041 BUS_DMASYNC_POSTWRITE);
4042 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
4043 sc->bge_cdata.bge_tx_dmamap[idx]);
4044 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
4045 sc->bge_cdata.bge_tx_chain[idx] = NULL;
4046 }
4047 sc->bge_txcnt--;
4048 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4049 }
4050
4051 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4052 if (sc->bge_txcnt == 0)
4053 sc->bge_timer = 0;
4054}
4055
4056#ifdef DEVICE_POLLING
4057static int
4058bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4059{
4060 struct bge_softc *sc = ifp->if_softc;
4061 uint16_t rx_prod, tx_cons;
4062 uint32_t statusword;
4063 int rx_npkts = 0;
4064
4065 BGE_LOCK(sc);
4066 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4067 BGE_UNLOCK(sc);
4068 return (rx_npkts);
4069 }
4070
4071 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4072 sc->bge_cdata.bge_status_map,
4073 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4074 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4075 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4076
4077 statusword = sc->bge_ldata.bge_status_block->bge_status;
4078 sc->bge_ldata.bge_status_block->bge_status = 0;
4079
4080 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4081 sc->bge_cdata.bge_status_map,
4082 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4083
4084 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
4085 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
4086 sc->bge_link_evt++;
4087
4088 if (cmd == POLL_AND_CHECK_STATUS)
4089 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4090 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4091 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
4092 bge_link_upd(sc);
4093
4094 sc->rxcycles = count;
4095 rx_npkts = bge_rxeof(sc, rx_prod, 1);
4096 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4097 BGE_UNLOCK(sc);
4098 return (rx_npkts);
4099 }
4100 bge_txeof(sc, tx_cons);
4101 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4102 bge_start_locked(ifp);
4103
4104 BGE_UNLOCK(sc);
4105 return (rx_npkts);
4106}
4107#endif /* DEVICE_POLLING */
4108
4109static int
4110bge_msi_intr(void *arg)
4111{
4112 struct bge_softc *sc;
4113
4114 sc = (struct bge_softc *)arg;
4115 /*
4116 * This interrupt is not shared and controller already
4117 * disabled further interrupt.
4118 */
4119 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4120 return (FILTER_HANDLED);
4121}
4122
4123static void
4124bge_intr_task(void *arg, int pending)
4125{
4126 struct bge_softc *sc;
4127 struct ifnet *ifp;
4128 uint32_t status, status_tag;
4129 uint16_t rx_prod, tx_cons;
4130
4131 sc = (struct bge_softc *)arg;
4132 ifp = sc->bge_ifp;
4133
4134 BGE_LOCK(sc);
4135 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4136 BGE_UNLOCK(sc);
4137 return;
4138 }
4139
4140 /* Get updated status block. */
4141 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4142 sc->bge_cdata.bge_status_map,
4143 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4144
4145 /* Save producer/consumer indexess. */
4146 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4147 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4148 status = sc->bge_ldata.bge_status_block->bge_status;
4149 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4150 sc->bge_ldata.bge_status_block->bge_status = 0;
4151 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4152 sc->bge_cdata.bge_status_map,
4153 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4154 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4155 status_tag = 0;
4156
4157 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4158 bge_link_upd(sc);
4159
4160 /* Let controller work. */
4161 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4162
4163 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4164 sc->bge_rx_saved_considx != rx_prod) {
4165 /* Check RX return ring producer/consumer. */
4166 BGE_UNLOCK(sc);
4167 bge_rxeof(sc, rx_prod, 0);
4168 BGE_LOCK(sc);
4169 }
4170 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4171 /* Check TX ring producer/consumer. */
4172 bge_txeof(sc, tx_cons);
4173 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4174 bge_start_locked(ifp);
4175 }
4176 BGE_UNLOCK(sc);
4177}
4178
4179static void
4180bge_intr(void *xsc)
4181{
4182 struct bge_softc *sc;
4183 struct ifnet *ifp;
4184 uint32_t statusword;
4185 uint16_t rx_prod, tx_cons;
4186
4187 sc = xsc;
4188
4189 BGE_LOCK(sc);
4190
4191 ifp = sc->bge_ifp;
4192
4193#ifdef DEVICE_POLLING
4194 if (ifp->if_capenable & IFCAP_POLLING) {
4195 BGE_UNLOCK(sc);
4196 return;
4197 }
4198#endif
4199
4200 /*
4201 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4202 * disable interrupts by writing nonzero like we used to, since with
4203 * our current organization this just gives complications and
4204 * pessimizations for re-enabling interrupts. We used to have races
4205 * instead of the necessary complications. Disabling interrupts
4206 * would just reduce the chance of a status update while we are
4207 * running (by switching to the interrupt-mode coalescence
4208 * parameters), but this chance is already very low so it is more
4209 * efficient to get another interrupt than prevent it.
4210 *
4211 * We do the ack first to ensure another interrupt if there is a
4212 * status update after the ack. We don't check for the status
4213 * changing later because it is more efficient to get another
4214 * interrupt than prevent it, not quite as above (not checking is
4215 * a smaller optimization than not toggling the interrupt enable,
4216 * since checking doesn't involve PCI accesses and toggling require
4217 * the status check). So toggling would probably be a pessimization
4218 * even with MSI. It would only be needed for using a task queue.
4219 */
4220 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4221
4222 /*
4223 * Do the mandatory PCI flush as well as get the link status.
4224 */
4225 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4226
4227 /* Make sure the descriptor ring indexes are coherent. */
4228 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4229 sc->bge_cdata.bge_status_map,
4230 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4231 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4232 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4233 sc->bge_ldata.bge_status_block->bge_status = 0;
4234 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4235 sc->bge_cdata.bge_status_map,
4236 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4237
4238 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4239 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4240 statusword || sc->bge_link_evt)
4241 bge_link_upd(sc);
4242
4243 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4244 /* Check RX return ring producer/consumer. */
4245 bge_rxeof(sc, rx_prod, 1);
4246 }
4247
4248 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4249 /* Check TX ring producer/consumer. */
4250 bge_txeof(sc, tx_cons);
4251 }
4252
4253 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4254 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4255 bge_start_locked(ifp);
4256
4257 BGE_UNLOCK(sc);
4258}
4259
4260static void
4261bge_asf_driver_up(struct bge_softc *sc)
4262{
4263 if (sc->bge_asf_mode & ASF_STACKUP) {
4264 /* Send ASF heartbeat aprox. every 2s */
4265 if (sc->bge_asf_count)
4266 sc->bge_asf_count --;
4267 else {
4268 sc->bge_asf_count = 2;
4269 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4270 BGE_FW_CMD_DRV_ALIVE);
4271 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4272 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4273 BGE_FW_HB_TIMEOUT_SEC);
4274 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4275 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4276 BGE_RX_CPU_DRV_EVENT);
4277 }
4278 }
4279}
4280
4281static void
4282bge_tick(void *xsc)
4283{
4284 struct bge_softc *sc = xsc;
4285 struct mii_data *mii = NULL;
4286
4287 BGE_LOCK_ASSERT(sc);
4288
4289 /* Synchronize with possible callout reset/stop. */
4290 if (callout_pending(&sc->bge_stat_ch) ||
4291 !callout_active(&sc->bge_stat_ch))
4292 return;
4293
4294 if (BGE_IS_5705_PLUS(sc))
4295 bge_stats_update_regs(sc);
4296 else
4297 bge_stats_update(sc);
4298
4299 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4300 mii = device_get_softc(sc->bge_miibus);
4301 /*
4302 * Do not touch PHY if we have link up. This could break
4303 * IPMI/ASF mode or produce extra input errors
4304 * (extra errors was reported for bcm5701 & bcm5704).
4305 */
4306 if (!sc->bge_link)
4307 mii_tick(mii);
4308 } else {
4309 /*
4310 * Since in TBI mode auto-polling can't be used we should poll
4311 * link status manually. Here we register pending link event
4312 * and trigger interrupt.
4313 */
4314#ifdef DEVICE_POLLING
4315 /* In polling mode we poll link state in bge_poll(). */
4316 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4317#endif
4318 {
4319 sc->bge_link_evt++;
4320 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4321 sc->bge_flags & BGE_FLAG_5788)
4322 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4323 else
4324 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4325 }
4326 }
4327
4328 bge_asf_driver_up(sc);
4329 bge_watchdog(sc);
4330
4331 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4332}
4333
4334static void
4335bge_stats_update_regs(struct bge_softc *sc)
4336{
4337 struct ifnet *ifp;
4338 struct bge_mac_stats *stats;
4339
4340 ifp = sc->bge_ifp;
4341 stats = &sc->bge_mac_stats;
4342
4343 stats->ifHCOutOctets +=
4344 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4345 stats->etherStatsCollisions +=
4346 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4347 stats->outXonSent +=
4348 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4349 stats->outXoffSent +=
4350 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4351 stats->dot3StatsInternalMacTransmitErrors +=
4352 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4353 stats->dot3StatsSingleCollisionFrames +=
4354 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4355 stats->dot3StatsMultipleCollisionFrames +=
4356 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4357 stats->dot3StatsDeferredTransmissions +=
4358 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4359 stats->dot3StatsExcessiveCollisions +=
4360 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4361 stats->dot3StatsLateCollisions +=
4362 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4363 stats->ifHCOutUcastPkts +=
4364 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4365 stats->ifHCOutMulticastPkts +=
4366 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4367 stats->ifHCOutBroadcastPkts +=
4368 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4369
4370 stats->ifHCInOctets +=
4371 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4372 stats->etherStatsFragments +=
4373 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4374 stats->ifHCInUcastPkts +=
4375 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4376 stats->ifHCInMulticastPkts +=
4377 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4378 stats->ifHCInBroadcastPkts +=
4379 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4380 stats->dot3StatsFCSErrors +=
4381 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4382 stats->dot3StatsAlignmentErrors +=
4383 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4384 stats->xonPauseFramesReceived +=
4385 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4386 stats->xoffPauseFramesReceived +=
4387 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4388 stats->macControlFramesReceived +=
4389 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4390 stats->xoffStateEntered +=
4391 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4392 stats->dot3StatsFramesTooLong +=
4393 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4394 stats->etherStatsJabbers +=
4395 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4396 stats->etherStatsUndersizePkts +=
4397 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4398
4399 stats->FramesDroppedDueToFilters +=
4400 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4401 stats->DmaWriteQueueFull +=
4402 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4403 stats->DmaWriteHighPriQueueFull +=
4404 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4405 stats->NoMoreRxBDs +=
4406 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4407 /*
4408 * XXX
4409 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4410 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4411 * includes number of unwanted multicast frames. This comes
4412 * from silicon bug and known workaround to get rough(not
4413 * exact) counter is to enable interrupt on MBUF low water
4414 * attention. This can be accomplished by setting
4415 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4416 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4417 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4418 * However that change would generate more interrupts and
4419 * there are still possibilities of losing multiple frames
4420 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4421 * Given that the workaround still would not get correct
4422 * counter I don't think it's worth to implement it. So
4423 * ignore reading the counter on controllers that have the
4424 * silicon bug.
4425 */
4426 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4427 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4428 sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4429 stats->InputDiscards +=
4430 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4431 stats->InputErrors +=
4432 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4433 stats->RecvThresholdHit +=
4434 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4435
4436 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4437 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4438 stats->InputErrors);
4439}
4440
4441static void
4442bge_stats_clear_regs(struct bge_softc *sc)
4443{
4444
4445 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4446 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4447 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4448 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4449 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4450 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4451 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4452 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4453 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4454 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4455 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4456 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4457 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4458
4459 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4460 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4461 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4462 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4463 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4464 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4465 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4466 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4467 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4468 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4469 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4470 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4471 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4472 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4473
4474 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4475 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4476 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4477 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4478 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4479 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4480 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4481}
4482
4483static void
4484bge_stats_update(struct bge_softc *sc)
4485{
4486 struct ifnet *ifp;
4487 bus_size_t stats;
4488 uint32_t cnt; /* current register value */
4489
4490 ifp = sc->bge_ifp;
4491
4492 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4493
4494#define READ_STAT(sc, stats, stat) \
4495 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4496
4497 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4498 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4499 sc->bge_tx_collisions = cnt;
4500
4501 cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
4502 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_nobds);
4503 sc->bge_rx_nobds = cnt;
4504 cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
4505 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrs);
4506 sc->bge_rx_inerrs = cnt;
4507 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4508 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4509 sc->bge_rx_discards = cnt;
4510
4511 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4512 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4513 sc->bge_tx_discards = cnt;
4514
4515#undef READ_STAT
4516}
4517
4518/*
4519 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4520 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4521 * but when such padded frames employ the bge IP/TCP checksum offload,
4522 * the hardware checksum assist gives incorrect results (possibly
4523 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4524 * If we pad such runts with zeros, the onboard checksum comes out correct.
4525 */
4526static __inline int
4527bge_cksum_pad(struct mbuf *m)
4528{
4529 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4530 struct mbuf *last;
4531
4532 /* If there's only the packet-header and we can pad there, use it. */
4533 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4534 M_TRAILINGSPACE(m) >= padlen) {
4535 last = m;
4536 } else {
4537 /*
4538 * Walk packet chain to find last mbuf. We will either
4539 * pad there, or append a new mbuf and pad it.
4540 */
4541 for (last = m; last->m_next != NULL; last = last->m_next);
4542 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4543 /* Allocate new empty mbuf, pad it. Compact later. */
4544 struct mbuf *n;
4545
4546 MGET(n, M_DONTWAIT, MT_DATA);
4547 if (n == NULL)
4548 return (ENOBUFS);
4549 n->m_len = 0;
4550 last->m_next = n;
4551 last = n;
4552 }
4553 }
4554
4555 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4556 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4557 last->m_len += padlen;
4558 m->m_pkthdr.len += padlen;
4559
4560 return (0);
4561}
4562
4563static struct mbuf *
4564bge_check_short_dma(struct mbuf *m)
4565{
4566 struct mbuf *n;
4567 int found;
4568
4569 /*
4570 * If device receive two back-to-back send BDs with less than
4571 * or equal to 8 total bytes then the device may hang. The two
4572 * back-to-back send BDs must in the same frame for this failure
4573 * to occur. Scan mbuf chains and see whether two back-to-back
4574 * send BDs are there. If this is the case, allocate new mbuf
4575 * and copy the frame to workaround the silicon bug.
4576 */
4577 for (n = m, found = 0; n != NULL; n = n->m_next) {
4578 if (n->m_len < 8) {
4579 found++;
4580 if (found > 1)
4581 break;
4582 continue;
4583 }
4584 found = 0;
4585 }
4586
4587 if (found > 1) {
4588 n = m_defrag(m, M_DONTWAIT);
4589 if (n == NULL)
4590 m_freem(m);
4591 } else
4592 n = m;
4593 return (n);
4594}
4595
4596static struct mbuf *
4597bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4598 uint16_t *flags)
4599{
4600 struct ip *ip;
4601 struct tcphdr *tcp;
4602 struct mbuf *n;
4603 uint16_t hlen;
4604 uint32_t poff;
4605
4606 if (M_WRITABLE(m) == 0) {
4607 /* Get a writable copy. */
4608 n = m_dup(m, M_DONTWAIT);
4609 m_freem(m);
4610 if (n == NULL)
4611 return (NULL);
4612 m = n;
4613 }
4614 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4615 if (m == NULL)
4616 return (NULL);
4617 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4618 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4619 m = m_pullup(m, poff + sizeof(struct tcphdr));
4620 if (m == NULL)
4621 return (NULL);
4622 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4623 m = m_pullup(m, poff + (tcp->th_off << 2));
4624 if (m == NULL)
4625 return (NULL);
4626 /*
4627 * It seems controller doesn't modify IP length and TCP pseudo
4628 * checksum. These checksum computed by upper stack should be 0.
4629 */
4630 *mss = m->m_pkthdr.tso_segsz;
4631 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4632 ip->ip_sum = 0;
4633 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4634 /* Clear pseudo checksum computed by TCP stack. */
4635 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4636 tcp->th_sum = 0;
4637 /*
4638 * Broadcom controllers uses different descriptor format for
4639 * TSO depending on ASIC revision. Due to TSO-capable firmware
4640 * license issue and lower performance of firmware based TSO
4641 * we only support hardware based TSO.
4642 */
4643 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4644 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4645 if (sc->bge_flags & BGE_FLAG_TSO3) {
4646 /*
4647 * For BCM5717 and newer controllers, hardware based TSO
4648 * uses the 14 lower bits of the bge_mss field to store the
4649 * MSS and the upper 2 bits to store the lowest 2 bits of
4650 * the IP/TCP header length. The upper 6 bits of the header
4651 * length are stored in the bge_flags[14:10,4] field. Jumbo
4652 * frames are supported.
4653 */
4654 *mss |= ((hlen & 0x3) << 14);
4655 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4656 } else {
4657 /*
4658 * For BCM5755 and newer controllers, hardware based TSO uses
4659 * the lower 11 bits to store the MSS and the upper 5 bits to
4660 * store the IP/TCP header length. Jumbo frames are not
4661 * supported.
4662 */
4663 *mss |= (hlen << 11);
4664 }
4665 return (m);
4666}
4667
4668/*
4669 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4670 * pointers to descriptors.
4671 */
4672static int
4673bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4674{
4675 bus_dma_segment_t segs[BGE_NSEG_NEW];
4676 bus_dmamap_t map;
4677 struct bge_tx_bd *d;
4678 struct mbuf *m = *m_head;
4679 uint32_t idx = *txidx;
4680 uint16_t csum_flags, mss, vlan_tag;
4681 int nsegs, i, error;
4682
4683 csum_flags = 0;
4684 mss = 0;
4685 vlan_tag = 0;
4686 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4687 m->m_next != NULL) {
4688 *m_head = bge_check_short_dma(m);
4689 if (*m_head == NULL)
4690 return (ENOBUFS);
4691 m = *m_head;
4692 }
4693 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4694 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4695 if (*m_head == NULL)
4696 return (ENOBUFS);
4697 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4698 BGE_TXBDFLAG_CPU_POST_DMA;
4699 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4700 if (m->m_pkthdr.csum_flags & CSUM_IP)
4701 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4702 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4703 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4704 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4705 (error = bge_cksum_pad(m)) != 0) {
4706 m_freem(m);
4707 *m_head = NULL;
4708 return (error);
4709 }
4710 }
4711 if (m->m_flags & M_LASTFRAG)
4712 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4713 else if (m->m_flags & M_FRAG)
4714 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4715 }
4716
4717 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4718 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4719 m->m_pkthdr.len > ETHER_MAX_LEN)
4720 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4721 if (sc->bge_forced_collapse > 0 &&
4722 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4723 /*
4724 * Forcedly collapse mbuf chains to overcome hardware
4725 * limitation which only support a single outstanding
4726 * DMA read operation.
4727 */
4728 if (sc->bge_forced_collapse == 1)
4729 m = m_defrag(m, M_DONTWAIT);
4730 else
4731 m = m_collapse(m, M_DONTWAIT,
4732 sc->bge_forced_collapse);
4733 if (m == NULL)
4734 m = *m_head;
4735 *m_head = m;
4736 }
4737 }
4738
4739 map = sc->bge_cdata.bge_tx_dmamap[idx];
4740 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4741 &nsegs, BUS_DMA_NOWAIT);
4742 if (error == EFBIG) {
4743 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4744 if (m == NULL) {
4745 m_freem(*m_head);
4746 *m_head = NULL;
4747 return (ENOBUFS);
4748 }
4749 *m_head = m;
4750 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4751 m, segs, &nsegs, BUS_DMA_NOWAIT);
4752 if (error) {
4753 m_freem(m);
4754 *m_head = NULL;
4755 return (error);
4756 }
4757 } else if (error != 0)
4758 return (error);
4759
4760 /* Check if we have enough free send BDs. */
4761 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4762 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4763 return (ENOBUFS);
4764 }
4765
4766 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4767
4768 if (m->m_flags & M_VLANTAG) {
4769 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4770 vlan_tag = m->m_pkthdr.ether_vtag;
4771 }
4772 for (i = 0; ; i++) {
4773 d = &sc->bge_ldata.bge_tx_ring[idx];
4774 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4775 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4776 d->bge_len = segs[i].ds_len;
4777 d->bge_flags = csum_flags;
4778 d->bge_vlan_tag = vlan_tag;
4779 d->bge_mss = mss;
4780 if (i == nsegs - 1)
4781 break;
4782 BGE_INC(idx, BGE_TX_RING_CNT);
4783 }
4784
4785 /* Mark the last segment as end of packet... */
4786 d->bge_flags |= BGE_TXBDFLAG_END;
4787
4788 /*
4789 * Insure that the map for this transmission
4790 * is placed at the array index of the last descriptor
4791 * in this chain.
4792 */
4793 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4794 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4795 sc->bge_cdata.bge_tx_chain[idx] = m;
4796 sc->bge_txcnt += nsegs;
4797
4798 BGE_INC(idx, BGE_TX_RING_CNT);
4799 *txidx = idx;
4800
4801 return (0);
4802}
4803
4804/*
4805 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4806 * to the mbuf data regions directly in the transmit descriptors.
4807 */
4808static void
4809bge_start_locked(struct ifnet *ifp)
4810{
4811 struct bge_softc *sc;
4812 struct mbuf *m_head;
4813 uint32_t prodidx;
4814 int count;
4815
4816 sc = ifp->if_softc;
4817 BGE_LOCK_ASSERT(sc);
4818
4819 if (!sc->bge_link ||
4820 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4821 IFF_DRV_RUNNING)
4822 return;
4823
4824 prodidx = sc->bge_tx_prodidx;
4825
4826 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4827 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4828 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4829 break;
4830 }
4831 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4832 if (m_head == NULL)
4833 break;
4834
4835 /*
4836 * XXX
4837 * The code inside the if() block is never reached since we
4838 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4839 * requests to checksum TCP/UDP in a fragmented packet.
4840 *
4841 * XXX
4842 * safety overkill. If this is a fragmented packet chain
4843 * with delayed TCP/UDP checksums, then only encapsulate
4844 * it if we have enough descriptors to handle the entire
4845 * chain at once.
4846 * (paranoia -- may not actually be needed)
4847 */
4848 if (m_head->m_flags & M_FIRSTFRAG &&
4849 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4850 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4851 m_head->m_pkthdr.csum_data + 16) {
4852 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4853 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4854 break;
4855 }
4856 }
4857
4858 /*
4859 * Pack the data into the transmit ring. If we
4860 * don't have room, set the OACTIVE flag and wait
4861 * for the NIC to drain the ring.
4862 */
4863 if (bge_encap(sc, &m_head, &prodidx)) {
4864 if (m_head == NULL)
4865 break;
4866 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4867 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4868 break;
4869 }
4870 ++count;
4871
4872 /*
4873 * If there's a BPF listener, bounce a copy of this frame
4874 * to him.
4875 */
4876#ifdef ETHER_BPF_MTAP
4877 ETHER_BPF_MTAP(ifp, m_head);
4878#else
4879 BPF_MTAP(ifp, m_head);
4880#endif
4881 }
4882
4883 if (count > 0) {
4884 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4885 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4886 /* Transmit. */
4887 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4888 /* 5700 b2 errata */
4889 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4890 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4891
4892 sc->bge_tx_prodidx = prodidx;
4893
4894 /*
4895 * Set a timeout in case the chip goes out to lunch.
4896 */
4897 sc->bge_timer = 5;
4898 }
4899}
4900
4901/*
4902 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4903 * to the mbuf data regions directly in the transmit descriptors.
4904 */
4905static void
4906bge_start(struct ifnet *ifp)
4907{
4908 struct bge_softc *sc;
4909
4910 sc = ifp->if_softc;
4911 BGE_LOCK(sc);
4912 bge_start_locked(ifp);
4913 BGE_UNLOCK(sc);
4914}
4915
4916static void
4917bge_init_locked(struct bge_softc *sc)
4918{
4919 struct ifnet *ifp;
4920 uint16_t *m;
4921 uint32_t mode;
4922
4923 BGE_LOCK_ASSERT(sc);
4924
4925 ifp = sc->bge_ifp;
4926
4927 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4928 return;
4929
4930 /* Cancel pending I/O and flush buffers. */
4931 bge_stop(sc);
4932
4933 bge_stop_fw(sc);
4934 bge_sig_pre_reset(sc, BGE_RESET_START);
4935 bge_reset(sc);
4936 bge_sig_legacy(sc, BGE_RESET_START);
4937 bge_sig_post_reset(sc, BGE_RESET_START);
4938
4939 bge_chipinit(sc);
4940
4941 /*
4942 * Init the various state machines, ring
4943 * control blocks and firmware.
4944 */
4945 if (bge_blockinit(sc)) {
4946 device_printf(sc->bge_dev, "initialization failure\n");
4947 return;
4948 }
4949
4950 ifp = sc->bge_ifp;
4951
4952 /* Specify MTU. */
4953 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4954 ETHER_HDR_LEN + ETHER_CRC_LEN +
4955 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4956
4957 /* Load our MAC address. */
4958 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4959 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4960 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4961
4962 /* Program promiscuous mode. */
4963 bge_setpromisc(sc);
4964
4965 /* Program multicast filter. */
4966 bge_setmulti(sc);
4967
4968 /* Program VLAN tag stripping. */
4969 bge_setvlan(sc);
4970
4971 /* Override UDP checksum offloading. */
4972 if (sc->bge_forced_udpcsum == 0)
4973 sc->bge_csum_features &= ~CSUM_UDP;
4974 else
4975 sc->bge_csum_features |= CSUM_UDP;
4976 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4977 ifp->if_capenable & IFCAP_TXCSUM) {
4978 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4979 ifp->if_hwassist |= sc->bge_csum_features;
4980 }
4981
4982 /* Init RX ring. */
4983 if (bge_init_rx_ring_std(sc) != 0) {
4984 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4985 bge_stop(sc);
4986 return;
4987 }
4988
4989 /*
4990 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4991 * memory to insure that the chip has in fact read the first
4992 * entry of the ring.
4993 */
4994 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4995 uint32_t v, i;
4996 for (i = 0; i < 10; i++) {
4997 DELAY(20);
4998 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4999 if (v == (MCLBYTES - ETHER_ALIGN))
5000 break;
5001 }
5002 if (i == 10)
5003 device_printf (sc->bge_dev,
5004 "5705 A0 chip failed to load RX ring\n");
5005 }
5006
5007 /* Init jumbo RX ring. */
5008 if (BGE_IS_JUMBO_CAPABLE(sc) &&
5009 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
5010 (MCLBYTES - ETHER_ALIGN)) {
5011 if (bge_init_rx_ring_jumbo(sc) != 0) {
5012 device_printf(sc->bge_dev,
5013 "no memory for jumbo Rx buffers.\n");
5014 bge_stop(sc);
5015 return;
5016 }
5017 }
5018
5019 /* Init our RX return ring index. */
5020 sc->bge_rx_saved_considx = 0;
5021
5022 /* Init our RX/TX stat counters. */
5023 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
5024
5025 /* Init TX ring. */
5026 bge_init_tx_ring(sc);
5027
5028 /* Enable TX MAC state machine lockup fix. */
5029 mode = CSR_READ_4(sc, BGE_TX_MODE);
5030 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
5031 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5032 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
5033 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5034 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5035 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5036 }
5037 /* Turn on transmitter. */
5038 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5039 DELAY(100);
5040
5041 /* Turn on receiver. */
5042 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5043 DELAY(10);
5044
5045 /*
5046 * Set the number of good frames to receive after RX MBUF
5047 * Low Watermark has been reached. After the RX MAC receives
5048 * this number of frames, it will drop subsequent incoming
5049 * frames until the MBUF High Watermark is reached.
5050 */
5051 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
5052 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
5053 else
5054 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
5055
5056 /* Clear MAC statistics. */
5057 if (BGE_IS_5705_PLUS(sc))
5058 bge_stats_clear_regs(sc);
5059
5060 /* Tell firmware we're alive. */
5061 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5062
5063#ifdef DEVICE_POLLING
5064 /* Disable interrupts if we are polling. */
5065 if (ifp->if_capenable & IFCAP_POLLING) {
5066 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5067 BGE_PCIMISCCTL_MASK_PCI_INTR);
5068 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5069 } else
5070#endif
5071
5072 /* Enable host interrupts. */
5073 {
5074 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5075 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5076 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5077 }
5078
5079 ifp->if_drv_flags |= IFF_DRV_RUNNING;
5080 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5081
5082 bge_ifmedia_upd_locked(ifp);
5083
5084 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
5085}
5086
5087static void
5088bge_init(void *xsc)
5089{
5090 struct bge_softc *sc = xsc;
5091
5092 BGE_LOCK(sc);
5093 bge_init_locked(sc);
5094 BGE_UNLOCK(sc);
5095}
5096
5097/*
5098 * Set media options.
5099 */
5100static int
5101bge_ifmedia_upd(struct ifnet *ifp)
5102{
5103 struct bge_softc *sc = ifp->if_softc;
5104 int res;
5105
5106 BGE_LOCK(sc);
5107 res = bge_ifmedia_upd_locked(ifp);
5108 BGE_UNLOCK(sc);
5109
5110 return (res);
5111}
5112
5113static int
5114bge_ifmedia_upd_locked(struct ifnet *ifp)
5115{
5116 struct bge_softc *sc = ifp->if_softc;
5117 struct mii_data *mii;
5118 struct mii_softc *miisc;
5119 struct ifmedia *ifm;
5120
5121 BGE_LOCK_ASSERT(sc);
5122
5123 ifm = &sc->bge_ifmedia;
5124
5125 /* If this is a 1000baseX NIC, enable the TBI port. */
5126 if (sc->bge_flags & BGE_FLAG_TBI) {
5127 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5128 return (EINVAL);
5129 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5130 case IFM_AUTO:
5131 /*
5132 * The BCM5704 ASIC appears to have a special
5133 * mechanism for programming the autoneg
5134 * advertisement registers in TBI mode.
5135 */
5136 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5137 uint32_t sgdig;
5138 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5139 if (sgdig & BGE_SGDIGSTS_DONE) {
5140 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5141 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5142 sgdig |= BGE_SGDIGCFG_AUTO |
5143 BGE_SGDIGCFG_PAUSE_CAP |
5144 BGE_SGDIGCFG_ASYM_PAUSE;
5145 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5146 sgdig | BGE_SGDIGCFG_SEND);
5147 DELAY(5);
5148 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5149 }
5150 }
5151 break;
5152 case IFM_1000_SX:
5153 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5154 BGE_CLRBIT(sc, BGE_MAC_MODE,
5155 BGE_MACMODE_HALF_DUPLEX);
5156 } else {
5157 BGE_SETBIT(sc, BGE_MAC_MODE,
5158 BGE_MACMODE_HALF_DUPLEX);
5159 }
5160 DELAY(40);
5161 break;
5162 default:
5163 return (EINVAL);
5164 }
5165 return (0);
5166 }
5167
5168 sc->bge_link_evt++;
5169 mii = device_get_softc(sc->bge_miibus);
5170 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5171 PHY_RESET(miisc);
5172 mii_mediachg(mii);
5173
5174 /*
5175 * Force an interrupt so that we will call bge_link_upd
5176 * if needed and clear any pending link state attention.
5177 * Without this we are not getting any further interrupts
5178 * for link state changes and thus will not UP the link and
5179 * not be able to send in bge_start_locked. The only
5180 * way to get things working was to receive a packet and
5181 * get an RX intr.
5182 * bge_tick should help for fiber cards and we might not
5183 * need to do this here if BGE_FLAG_TBI is set but as
5184 * we poll for fiber anyway it should not harm.
5185 */
5186 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5187 sc->bge_flags & BGE_FLAG_5788)
5188 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5189 else
5190 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5191
5192 return (0);
5193}
5194
5195/*
5196 * Report current media status.
5197 */
5198static void
5199bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5200{
5201 struct bge_softc *sc = ifp->if_softc;
5202 struct mii_data *mii;
5203
5204 BGE_LOCK(sc);
5205
5206 if (sc->bge_flags & BGE_FLAG_TBI) {
5207 ifmr->ifm_status = IFM_AVALID;
5208 ifmr->ifm_active = IFM_ETHER;
5209 if (CSR_READ_4(sc, BGE_MAC_STS) &
5210 BGE_MACSTAT_TBI_PCS_SYNCHED)
5211 ifmr->ifm_status |= IFM_ACTIVE;
5212 else {
5213 ifmr->ifm_active |= IFM_NONE;
5214 BGE_UNLOCK(sc);
5215 return;
5216 }
5217 ifmr->ifm_active |= IFM_1000_SX;
5218 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5219 ifmr->ifm_active |= IFM_HDX;
5220 else
5221 ifmr->ifm_active |= IFM_FDX;
5222 BGE_UNLOCK(sc);
5223 return;
5224 }
5225
5226 mii = device_get_softc(sc->bge_miibus);
5227 mii_pollstat(mii);
5228 ifmr->ifm_active = mii->mii_media_active;
5229 ifmr->ifm_status = mii->mii_media_status;
5230
5231 BGE_UNLOCK(sc);
5232}
5233
5234static int
5235bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5236{
5237 struct bge_softc *sc = ifp->if_softc;
5238 struct ifreq *ifr = (struct ifreq *) data;
5239 struct mii_data *mii;
5240 int flags, mask, error = 0;
5241
5242 switch (command) {
5243 case SIOCSIFMTU:
5244 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5245 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5246 if (ifr->ifr_mtu < ETHERMIN ||
5247 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5248 error = EINVAL;
5249 break;
5250 }
5251 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5252 error = EINVAL;
5253 break;
5254 }
5255 BGE_LOCK(sc);
5256 if (ifp->if_mtu != ifr->ifr_mtu) {
5257 ifp->if_mtu = ifr->ifr_mtu;
5258 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5259 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5260 bge_init_locked(sc);
5261 }
5262 }
5263 BGE_UNLOCK(sc);
5264 break;
5265 case SIOCSIFFLAGS:
5266 BGE_LOCK(sc);
5267 if (ifp->if_flags & IFF_UP) {
5268 /*
5269 * If only the state of the PROMISC flag changed,
5270 * then just use the 'set promisc mode' command
5271 * instead of reinitializing the entire NIC. Doing
5272 * a full re-init means reloading the firmware and
5273 * waiting for it to start up, which may take a
5274 * second or two. Similarly for ALLMULTI.
5275 */
5276 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5277 flags = ifp->if_flags ^ sc->bge_if_flags;
5278 if (flags & IFF_PROMISC)
5279 bge_setpromisc(sc);
5280 if (flags & IFF_ALLMULTI)
5281 bge_setmulti(sc);
5282 } else
5283 bge_init_locked(sc);
5284 } else {
5285 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5286 bge_stop(sc);
5287 }
5288 }
5289 sc->bge_if_flags = ifp->if_flags;
5290 BGE_UNLOCK(sc);
5291 error = 0;
5292 break;
5293 case SIOCADDMULTI:
5294 case SIOCDELMULTI:
5295 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5296 BGE_LOCK(sc);
5297 bge_setmulti(sc);
5298 BGE_UNLOCK(sc);
5299 error = 0;
5300 }
5301 break;
5302 case SIOCSIFMEDIA:
5303 case SIOCGIFMEDIA:
5304 if (sc->bge_flags & BGE_FLAG_TBI) {
5305 error = ifmedia_ioctl(ifp, ifr,
5306 &sc->bge_ifmedia, command);
5307 } else {
5308 mii = device_get_softc(sc->bge_miibus);
5309 error = ifmedia_ioctl(ifp, ifr,
5310 &mii->mii_media, command);
5311 }
5312 break;
5313 case SIOCSIFCAP:
5314 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5315#ifdef DEVICE_POLLING
5316 if (mask & IFCAP_POLLING) {
5317 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5318 error = ether_poll_register(bge_poll, ifp);
5319 if (error)
5320 return (error);
5321 BGE_LOCK(sc);
5322 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5323 BGE_PCIMISCCTL_MASK_PCI_INTR);
5324 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5325 ifp->if_capenable |= IFCAP_POLLING;
5326 BGE_UNLOCK(sc);
5327 } else {
5328 error = ether_poll_deregister(ifp);
5329 /* Enable interrupt even in error case */
5330 BGE_LOCK(sc);
5331 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5332 BGE_PCIMISCCTL_MASK_PCI_INTR);
5333 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5334 ifp->if_capenable &= ~IFCAP_POLLING;
5335 BGE_UNLOCK(sc);
5336 }
5337 }
5338#endif
5339 if ((mask & IFCAP_TXCSUM) != 0 &&
5340 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5341 ifp->if_capenable ^= IFCAP_TXCSUM;
5342 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5343 ifp->if_hwassist |= sc->bge_csum_features;
5344 else
5345 ifp->if_hwassist &= ~sc->bge_csum_features;
5346 }
5347
5348 if ((mask & IFCAP_RXCSUM) != 0 &&
5349 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5350 ifp->if_capenable ^= IFCAP_RXCSUM;
5351
5352 if ((mask & IFCAP_TSO4) != 0 &&
5353 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5354 ifp->if_capenable ^= IFCAP_TSO4;
5355 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5356 ifp->if_hwassist |= CSUM_TSO;
5357 else
5358 ifp->if_hwassist &= ~CSUM_TSO;
5359 }
5360
5361 if (mask & IFCAP_VLAN_MTU) {
5362 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5363 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5364 bge_init(sc);
5365 }
5366
5367 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5368 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5369 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5370 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5371 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5372 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5373 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5374 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5375 BGE_LOCK(sc);
5376 bge_setvlan(sc);
5377 BGE_UNLOCK(sc);
5378 }
5379#ifdef VLAN_CAPABILITIES
5380 VLAN_CAPABILITIES(ifp);
5381#endif
5382 break;
5383 default:
5384 error = ether_ioctl(ifp, command, data);
5385 break;
5386 }
5387
5388 return (error);
5389}
5390
5391static void
5392bge_watchdog(struct bge_softc *sc)
5393{
5394 struct ifnet *ifp;
5395
5396 BGE_LOCK_ASSERT(sc);
5397
5398 if (sc->bge_timer == 0 || --sc->bge_timer)
5399 return;
5400
5401 ifp = sc->bge_ifp;
5402
5403 if_printf(ifp, "watchdog timeout -- resetting\n");
5404
5405 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5406 bge_init_locked(sc);
5407
5408 ifp->if_oerrors++;
5409}
5410
5411static void
5412bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5413{
5414 int i;
5415
5416 BGE_CLRBIT(sc, reg, bit);
5417
5418 for (i = 0; i < BGE_TIMEOUT; i++) {
5419 if ((CSR_READ_4(sc, reg) & bit) == 0)
5420 return;
5421 DELAY(100);
5422 }
5423}
5424
5425/*
5426 * Stop the adapter and free any mbufs allocated to the
5427 * RX and TX lists.
5428 */
5429static void
5430bge_stop(struct bge_softc *sc)
5431{
5432 struct ifnet *ifp;
5433
5434 BGE_LOCK_ASSERT(sc);
5435
5436 ifp = sc->bge_ifp;
5437
5438 callout_stop(&sc->bge_stat_ch);
5439
5440 /* Disable host interrupts. */
5441 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5442 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5443
5444 /*
5445 * Tell firmware we're shutting down.
5446 */
5447 bge_stop_fw(sc);
5448 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5449
5450 /*
5451 * Disable all of the receiver blocks.
5452 */
5453 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5454 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5455 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5456 if (BGE_IS_5700_FAMILY(sc))
5457 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5458 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5459 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5460 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5461
5462 /*
5463 * Disable all of the transmit blocks.
5464 */
5465 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5466 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5467 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5468 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5469 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5470 if (BGE_IS_5700_FAMILY(sc))
5471 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5472 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5473
5474 /*
5475 * Shut down all of the memory managers and related
5476 * state machines.
5477 */
5478 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5479 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5480 if (BGE_IS_5700_FAMILY(sc))
5481 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5482
5483 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5484 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5485 if (!(BGE_IS_5705_PLUS(sc))) {
5486 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5487 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5488 }
5489 /* Update MAC statistics. */
5490 if (BGE_IS_5705_PLUS(sc))
5491 bge_stats_update_regs(sc);
5492
5493 bge_reset(sc);
5494 bge_sig_legacy(sc, BGE_RESET_STOP);
5495 bge_sig_post_reset(sc, BGE_RESET_STOP);
5496
5497 /*
5498 * Keep the ASF firmware running if up.
5499 */
5500 if (sc->bge_asf_mode & ASF_STACKUP)
5501 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5502 else
5503 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5504
5505 /* Free the RX lists. */
5506 bge_free_rx_ring_std(sc);
5507
5508 /* Free jumbo RX list. */
5509 if (BGE_IS_JUMBO_CAPABLE(sc))
5510 bge_free_rx_ring_jumbo(sc);
5511
5512 /* Free TX buffers. */
5513 bge_free_tx_ring(sc);
5514
5515 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5516
5517 /* Clear MAC's link state (PHY may still have link UP). */
5518 if (bootverbose && sc->bge_link)
5519 if_printf(sc->bge_ifp, "link DOWN\n");
5520 sc->bge_link = 0;
5521
5522 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5523}
5524
5525/*
5526 * Stop all chip I/O so that the kernel's probe routines don't
5527 * get confused by errant DMAs when rebooting.
5528 */
5529static int
5530bge_shutdown(device_t dev)
5531{
5532 struct bge_softc *sc;
5533
5534 sc = device_get_softc(dev);
5535 BGE_LOCK(sc);
5536 bge_stop(sc);
5537 bge_reset(sc);
5538 BGE_UNLOCK(sc);
5539
5540 return (0);
5541}
5542
5543static int
5544bge_suspend(device_t dev)
5545{
5546 struct bge_softc *sc;
5547
5548 sc = device_get_softc(dev);
5549 BGE_LOCK(sc);
5550 bge_stop(sc);
5551 BGE_UNLOCK(sc);
5552
5553 return (0);
5554}
5555
5556static int
5557bge_resume(device_t dev)
5558{
5559 struct bge_softc *sc;
5560 struct ifnet *ifp;
5561
5562 sc = device_get_softc(dev);
5563 BGE_LOCK(sc);
5564 ifp = sc->bge_ifp;
5565 if (ifp->if_flags & IFF_UP) {
5566 bge_init_locked(sc);
5567 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5568 bge_start_locked(ifp);
5569 }
5570 BGE_UNLOCK(sc);
5571
5572 return (0);
5573}
5574
5575static void
5576bge_link_upd(struct bge_softc *sc)
5577{
5578 struct mii_data *mii;
5579 uint32_t link, status;
5580
5581 BGE_LOCK_ASSERT(sc);
5582
5583 /* Clear 'pending link event' flag. */
5584 sc->bge_link_evt = 0;
5585
5586 /*
5587 * Process link state changes.
5588 * Grrr. The link status word in the status block does
5589 * not work correctly on the BCM5700 rev AX and BX chips,
5590 * according to all available information. Hence, we have
5591 * to enable MII interrupts in order to properly obtain
5592 * async link changes. Unfortunately, this also means that
5593 * we have to read the MAC status register to detect link
5594 * changes, thereby adding an additional register access to
5595 * the interrupt handler.
5596 *
5597 * XXX: perhaps link state detection procedure used for
5598 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5599 */
5600
5601 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5602 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5603 status = CSR_READ_4(sc, BGE_MAC_STS);
5604 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5605 mii = device_get_softc(sc->bge_miibus);
5606 mii_pollstat(mii);
5607 if (!sc->bge_link &&
5608 mii->mii_media_status & IFM_ACTIVE &&
5609 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5610 sc->bge_link++;
5611 if (bootverbose)
5612 if_printf(sc->bge_ifp, "link UP\n");
5613 } else if (sc->bge_link &&
5614 (!(mii->mii_media_status & IFM_ACTIVE) ||
5615 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5616 sc->bge_link = 0;
5617 if (bootverbose)
5618 if_printf(sc->bge_ifp, "link DOWN\n");
5619 }
5620
5621 /* Clear the interrupt. */
5622 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5623 BGE_EVTENB_MI_INTERRUPT);
5624 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5625 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5626 BRGPHY_INTRS);
5627 }
5628 return;
5629 }
5630
5631 if (sc->bge_flags & BGE_FLAG_TBI) {
5632 status = CSR_READ_4(sc, BGE_MAC_STS);
5633 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5634 if (!sc->bge_link) {
5635 sc->bge_link++;
5636 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5637 BGE_CLRBIT(sc, BGE_MAC_MODE,
5638 BGE_MACMODE_TBI_SEND_CFGS);
5639 DELAY(40);
5640 }
5641 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5642 if (bootverbose)
5643 if_printf(sc->bge_ifp, "link UP\n");
5644 if_link_state_change(sc->bge_ifp,
5645 LINK_STATE_UP);
5646 }
5647 } else if (sc->bge_link) {
5648 sc->bge_link = 0;
5649 if (bootverbose)
5650 if_printf(sc->bge_ifp, "link DOWN\n");
5651 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5652 }
5653 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5654 /*
5655 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5656 * in status word always set. Workaround this bug by reading
5657 * PHY link status directly.
5658 */
5659 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5660
5661 if (link != sc->bge_link ||
5662 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5663 mii = device_get_softc(sc->bge_miibus);
5664 mii_pollstat(mii);
5665 if (!sc->bge_link &&
5666 mii->mii_media_status & IFM_ACTIVE &&
5667 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5668 sc->bge_link++;
5669 if (bootverbose)
5670 if_printf(sc->bge_ifp, "link UP\n");
5671 } else if (sc->bge_link &&
5672 (!(mii->mii_media_status & IFM_ACTIVE) ||
5673 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5674 sc->bge_link = 0;
5675 if (bootverbose)
5676 if_printf(sc->bge_ifp, "link DOWN\n");
5677 }
5678 }
5679 } else {
5680 /*
5681 * For controllers that call mii_tick, we have to poll
5682 * link status.
5683 */
5684 mii = device_get_softc(sc->bge_miibus);
5685 mii_pollstat(mii);
5686 bge_miibus_statchg(sc->bge_dev);
5687 }
5688
5689 /* Clear the attention. */
5690 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5691 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5692 BGE_MACSTAT_LINK_CHANGED);
5693}
5694
5695static void
5696bge_add_sysctls(struct bge_softc *sc)
5697{
5698 struct sysctl_ctx_list *ctx;
5699 struct sysctl_oid_list *children;
5700 char tn[32];
5701 int unit;
5702
5703 ctx = device_get_sysctl_ctx(sc->bge_dev);
5704 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5705
5706#ifdef BGE_REGISTER_DEBUG
5707 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5708 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5709 "Debug Information");
5710
5711 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5712 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5713 "Register Read");
5714
5715 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5716 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5717 "Memory Read");
5718
5719#endif
5720
5721 unit = device_get_unit(sc->bge_dev);
5722 /*
5723 * A common design characteristic for many Broadcom client controllers
5724 * is that they only support a single outstanding DMA read operation
5725 * on the PCIe bus. This means that it will take twice as long to fetch
5726 * a TX frame that is split into header and payload buffers as it does
5727 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5728 * these controllers, coalescing buffers to reduce the number of memory
5729 * reads is effective way to get maximum performance(about 940Mbps).
5730 * Without collapsing TX buffers the maximum TCP bulk transfer
5731 * performance is about 850Mbps. However forcing coalescing mbufs
5732 * consumes a lot of CPU cycles, so leave it off by default.
5733 */
5734 sc->bge_forced_collapse = 0;
5735 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5736 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5737 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5738 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5739 "Number of fragmented TX buffers of a frame allowed before "
5740 "forced collapsing");
5741
5742 sc->bge_msi = 1;
5743 snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
5744 TUNABLE_INT_FETCH(tn, &sc->bge_msi);
5745 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
5746 CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
5747
5748 /*
5749 * It seems all Broadcom controllers have a bug that can generate UDP
5750 * datagrams with checksum value 0 when TX UDP checksum offloading is
5751 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5752 * Even though the probability of generating such UDP datagrams is
5753 * low, I don't want to see FreeBSD boxes to inject such datagrams
5754 * into network so disable UDP checksum offloading by default. Users
5755 * still override this behavior by setting a sysctl variable,
5756 * dev.bge.0.forced_udpcsum.
5757 */
5758 sc->bge_forced_udpcsum = 0;
5759 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5760 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5761 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5762 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5763 "Enable UDP checksum offloading even if controller can "
5764 "generate UDP checksum value 0");
5765
5766 if (BGE_IS_5705_PLUS(sc))
5767 bge_add_sysctl_stats_regs(sc, ctx, children);
5768 else
5769 bge_add_sysctl_stats(sc, ctx, children);
5770}
5771
5772#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5773 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5774 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5775 desc)
5776
5777static void
5778bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5779 struct sysctl_oid_list *parent)
5780{
5781 struct sysctl_oid *tree;
5782 struct sysctl_oid_list *children, *schildren;
5783
5784 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5785 NULL, "BGE Statistics");
5786 schildren = children = SYSCTL_CHILDREN(tree);
5787 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5788 children, COSFramesDroppedDueToFilters,
5789 "FramesDroppedDueToFilters");
5790 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5791 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5792 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5793 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5794 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5795 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5796 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5797 children, ifInDiscards, "InputDiscards");
5798 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5799 children, ifInErrors, "InputErrors");
5800 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5801 children, nicRecvThresholdHit, "RecvThresholdHit");
5802 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5803 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5804 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5805 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5806 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5807 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5808 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5809 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5810 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5811 children, nicRingStatusUpdate, "RingStatusUpdate");
5812 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5813 children, nicInterrupts, "Interrupts");
5814 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5815 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5816 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5817 children, nicSendThresholdHit, "SendThresholdHit");
5818
5819 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5820 NULL, "BGE RX Statistics");
5821 children = SYSCTL_CHILDREN(tree);
5822 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5823 children, rxstats.ifHCInOctets, "ifHCInOctets");
5824 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5825 children, rxstats.etherStatsFragments, "Fragments");
5826 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5827 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5828 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5829 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5830 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5831 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5832 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5833 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5834 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5835 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5836 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5837 children, rxstats.xoffPauseFramesReceived,
5838 "xoffPauseFramesReceived");
5839 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5840 children, rxstats.macControlFramesReceived,
5841 "ControlFramesReceived");
5842 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5843 children, rxstats.xoffStateEntered, "xoffStateEntered");
5844 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5845 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5846 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5847 children, rxstats.etherStatsJabbers, "Jabbers");
5848 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5849 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5850 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5851 children, rxstats.inRangeLengthError, "inRangeLengthError");
5852 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5853 children, rxstats.outRangeLengthError, "outRangeLengthError");
5854
5855 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5856 NULL, "BGE TX Statistics");
5857 children = SYSCTL_CHILDREN(tree);
5858 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5859 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5860 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5861 children, txstats.etherStatsCollisions, "Collisions");
5862 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5863 children, txstats.outXonSent, "XonSent");
5864 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5865 children, txstats.outXoffSent, "XoffSent");
5866 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5867 children, txstats.flowControlDone, "flowControlDone");
5868 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5869 children, txstats.dot3StatsInternalMacTransmitErrors,
5870 "InternalMacTransmitErrors");
5871 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5872 children, txstats.dot3StatsSingleCollisionFrames,
5873 "SingleCollisionFrames");
5874 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5875 children, txstats.dot3StatsMultipleCollisionFrames,
5876 "MultipleCollisionFrames");
5877 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5878 children, txstats.dot3StatsDeferredTransmissions,
5879 "DeferredTransmissions");
5880 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5881 children, txstats.dot3StatsExcessiveCollisions,
5882 "ExcessiveCollisions");
5883 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5884 children, txstats.dot3StatsLateCollisions,
5885 "LateCollisions");
5886 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5887 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5888 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5889 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5890 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5891 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5892 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5893 children, txstats.dot3StatsCarrierSenseErrors,
5894 "CarrierSenseErrors");
5895 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5896 children, txstats.ifOutDiscards, "Discards");
5897 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5898 children, txstats.ifOutErrors, "Errors");
5899}
5900
5901#undef BGE_SYSCTL_STAT
5902
5903#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5904 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5905
5906static void
5907bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5908 struct sysctl_oid_list *parent)
5909{
5910 struct sysctl_oid *tree;
5911 struct sysctl_oid_list *child, *schild;
5912 struct bge_mac_stats *stats;
5913
5914 stats = &sc->bge_mac_stats;
5915 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5916 NULL, "BGE Statistics");
5917 schild = child = SYSCTL_CHILDREN(tree);
5918 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5919 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5920 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5921 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5922 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5923 &stats->DmaWriteHighPriQueueFull,
5924 "NIC DMA Write High Priority Queue Full");
5925 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5926 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5927 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5928 &stats->InputDiscards, "Discarded Input Frames");
5929 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5930 &stats->InputErrors, "Input Errors");
5931 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5932 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5933
5934 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5935 NULL, "BGE RX Statistics");
5936 child = SYSCTL_CHILDREN(tree);
5937 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5938 &stats->ifHCInOctets, "Inbound Octets");
5939 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5940 &stats->etherStatsFragments, "Fragments");
5941 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5942 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5943 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5944 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5945 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5946 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5947 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5948 &stats->dot3StatsFCSErrors, "FCS Errors");
5949 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5950 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5951 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5952 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5953 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5954 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5955 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5956 &stats->macControlFramesReceived, "MAC Control Frames Received");
5957 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5958 &stats->xoffStateEntered, "XOFF State Entered");
5959 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5960 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5961 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5962 &stats->etherStatsJabbers, "Jabbers");
5963 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5964 &stats->etherStatsUndersizePkts, "Undersized Packets");
5965
5966 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5967 NULL, "BGE TX Statistics");
5968 child = SYSCTL_CHILDREN(tree);
5969 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5970 &stats->ifHCOutOctets, "Outbound Octets");
5971 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5972 &stats->etherStatsCollisions, "TX Collisions");
5973 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5974 &stats->outXonSent, "XON Sent");
5975 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5976 &stats->outXoffSent, "XOFF Sent");
5977 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5978 &stats->dot3StatsInternalMacTransmitErrors,
5979 "Internal MAC TX Errors");
5980 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5981 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5982 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5983 &stats->dot3StatsMultipleCollisionFrames,
5984 "Multiple Collision Frames");
5985 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5986 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5987 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5988 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5989 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5990 &stats->dot3StatsLateCollisions, "Late Collisions");
5991 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5992 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5993 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5994 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5995 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5996 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5997}
5998
5999#undef BGE_SYSCTL_STAT_ADD64
6000
6001static int
6002bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
6003{
6004 struct bge_softc *sc;
6005 uint32_t result;
6006 int offset;
6007
6008 sc = (struct bge_softc *)arg1;
6009 offset = arg2;
6010 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
6011 offsetof(bge_hostaddr, bge_addr_lo));
6012 return (sysctl_handle_int(oidp, &result, 0, req));
6013}
6014
6015#ifdef BGE_REGISTER_DEBUG
6016static int
6017bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
6018{
6019 struct bge_softc *sc;
6020 uint16_t *sbdata;
6021 int error, result, sbsz;
6022 int i, j;
6023
6024 result = -1;
6025 error = sysctl_handle_int(oidp, &result, 0, req);
6026 if (error || (req->newptr == NULL))
6027 return (error);
6028
6029 if (result == 1) {
6030 sc = (struct bge_softc *)arg1;
6031
6032 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6033 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
6034 sbsz = BGE_STATUS_BLK_SZ;
6035 else
6036 sbsz = 32;
6037 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
6038 printf("Status Block:\n");
6039 BGE_LOCK(sc);
6040 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6041 sc->bge_cdata.bge_status_map,
6042 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6043 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
6044 printf("%06x:", i);
6045 for (j = 0; j < 8; j++)
6046 printf(" %04x", sbdata[i++]);
6047 printf("\n");
6048 }
6049
6050 printf("Registers:\n");
6051 for (i = 0x800; i < 0xA00; ) {
6052 printf("%06x:", i);
6053 for (j = 0; j < 8; j++) {
6054 printf(" %08x", CSR_READ_4(sc, i));
6055 i += 4;
6056 }
6057 printf("\n");
6058 }
6059 BGE_UNLOCK(sc);
6060
6061 printf("Hardware Flags:\n");
6062 if (BGE_IS_5717_PLUS(sc))
6063 printf(" - 5717 Plus\n");
6064 if (BGE_IS_5755_PLUS(sc))
6065 printf(" - 5755 Plus\n");
6066 if (BGE_IS_575X_PLUS(sc))
6067 printf(" - 575X Plus\n");
6068 if (BGE_IS_5705_PLUS(sc))
6069 printf(" - 5705 Plus\n");
6070 if (BGE_IS_5714_FAMILY(sc))
6071 printf(" - 5714 Family\n");
6072 if (BGE_IS_5700_FAMILY(sc))
6073 printf(" - 5700 Family\n");
6074 if (sc->bge_flags & BGE_FLAG_JUMBO)
6075 printf(" - Supports Jumbo Frames\n");
6076 if (sc->bge_flags & BGE_FLAG_PCIX)
6077 printf(" - PCI-X Bus\n");
6078 if (sc->bge_flags & BGE_FLAG_PCIE)
6079 printf(" - PCI Express Bus\n");
6080 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
6081 printf(" - No 3 LEDs\n");
6082 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
6083 printf(" - RX Alignment Bug\n");
6084 }
6085
6086 return (error);
6087}
6088
6089static int
6090bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6091{
6092 struct bge_softc *sc;
6093 int error;
6094 uint16_t result;
6095 uint32_t val;
6096
6097 result = -1;
6098 error = sysctl_handle_int(oidp, &result, 0, req);
6099 if (error || (req->newptr == NULL))
6100 return (error);
6101
6102 if (result < 0x8000) {
6103 sc = (struct bge_softc *)arg1;
6104 val = CSR_READ_4(sc, result);
6105 printf("reg 0x%06X = 0x%08X\n", result, val);
6106 }
6107
6108 return (error);
6109}
6110
6111static int
6112bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6113{
6114 struct bge_softc *sc;
6115 int error;
6116 uint16_t result;
6117 uint32_t val;
6118
6119 result = -1;
6120 error = sysctl_handle_int(oidp, &result, 0, req);
6121 if (error || (req->newptr == NULL))
6122 return (error);
6123
6124 if (result < 0x8000) {
6125 sc = (struct bge_softc *)arg1;
6126 val = bge_readmem_ind(sc, result);
6127 printf("mem 0x%06X = 0x%08X\n", result, val);
6128 }
6129
6130 return (error);
6131}
6132#endif
6133
6134static int
6135bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6136{
6137
6138 if (sc->bge_flags & BGE_FLAG_EADDR)
6139 return (1);
6140
6141#ifdef __sparc64__
6142 OF_getetheraddr(sc->bge_dev, ether_addr);
6143 return (0);
6144#endif
6145 return (1);
6146}
6147
6148static int
6149bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6150{
6151 uint32_t mac_addr;
6152
6153 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6154 if ((mac_addr >> 16) == 0x484b) {
6155 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6156 ether_addr[1] = (uint8_t)mac_addr;
6157 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6158 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6159 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6160 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6161 ether_addr[5] = (uint8_t)mac_addr;
6162 return (0);
6163 }
6164 return (1);
6165}
6166
6167static int
6168bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6169{
6170 int mac_offset = BGE_EE_MAC_OFFSET;
6171
6172 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6173 mac_offset = BGE_EE_MAC_OFFSET_5906;
6174
6175 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6176 ETHER_ADDR_LEN));
6177}
6178
6179static int
6180bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6181{
6182
6183 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6184 return (1);
6185
6186 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6187 ETHER_ADDR_LEN));
6188}
6189
6190static int
6191bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6192{
6193 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6194 /* NOTE: Order is critical */
6195 bge_get_eaddr_fw,
6196 bge_get_eaddr_mem,
6197 bge_get_eaddr_nvram,
6198 bge_get_eaddr_eeprom,
6199 NULL
6200 };
6201 const bge_eaddr_fcn_t *func;
6202
6203 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6204 if ((*func)(sc, eaddr) == 0)
6205 break;
6206 }
6207 return (*func == NULL ? ENXIO : 0);
6208}