Deleted Added
full compact
if_bge.c (214428) if_bge.c (215297)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 214428 2010-10-27 17:20:19Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 215297 2010-11-14 13:26:10Z marius $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
220
221 { SK_VENDORID, SK_DEVICEID_ALTIMA },
222
223 { TC_VENDORID, TC_DEVICEID_3C996 },
224
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
226 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
227 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
228
229 { 0, 0 }
230};
231
232static const struct bge_vendor {
233 uint16_t v_id;
234 const char *v_name;
235} bge_vendors[] = {
236 { ALTEON_VENDORID, "Alteon" },
237 { ALTIMA_VENDORID, "Altima" },
238 { APPLE_VENDORID, "Apple" },
239 { BCOM_VENDORID, "Broadcom" },
240 { SK_VENDORID, "SysKonnect" },
241 { TC_VENDORID, "3Com" },
242 { FJTSU_VENDORID, "Fujitsu" },
243
244 { 0, NULL }
245};
246
247static const struct bge_revision {
248 uint32_t br_chipid;
249 const char *br_name;
250} bge_revisions[] = {
251 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
252 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
253 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
254 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
255 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
256 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
257 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
258 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
259 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
260 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
261 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
262 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
263 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
264 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
265 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
266 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
267 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
268 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
269 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
270 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
271 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
272 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
273 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
274 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
275 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
276 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
277 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
278 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
279 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
280 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
281 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
282 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
283 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
284 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
285 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
286 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
287 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
288 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
289 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
290 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
291 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
292 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
293 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
294 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
295 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
296 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
297 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
298 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
299 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
300 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
301 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
302 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
303 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
304 /* 5754 and 5787 share the same ASIC ID */
305 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
306 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
307 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
308 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
309 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
310 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
311 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
312
313 { 0, NULL }
314};
315
316/*
317 * Some defaults for major revisions, so that newer steppings
318 * that we don't know about have a shot at working.
319 */
320static const struct bge_revision bge_majorrevs[] = {
321 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
322 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
323 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
324 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
325 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
326 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
327 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
328 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
329 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
330 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
331 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
332 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
333 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
334 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
335 /* 5754 and 5787 share the same ASIC ID */
336 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
337 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
338 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
339 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
340
341 { 0, NULL }
342};
343
344#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
345#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
346#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
347#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
348#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
349#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
350#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
351
352const struct bge_revision * bge_lookup_rev(uint32_t);
353const struct bge_vendor * bge_lookup_vendor(uint16_t);
354
355typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
356
357static int bge_probe(device_t);
358static int bge_attach(device_t);
359static int bge_detach(device_t);
360static int bge_suspend(device_t);
361static int bge_resume(device_t);
362static void bge_release_resources(struct bge_softc *);
363static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
364static int bge_dma_alloc(struct bge_softc *);
365static void bge_dma_free(struct bge_softc *);
366static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
367 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
368
369static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
370static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
371static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
372static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
373static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
374
375static void bge_txeof(struct bge_softc *, uint16_t);
376static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
377static int bge_rxeof(struct bge_softc *, uint16_t, int);
378
379static void bge_asf_driver_up (struct bge_softc *);
380static void bge_tick(void *);
381static void bge_stats_clear_regs(struct bge_softc *);
382static void bge_stats_update(struct bge_softc *);
383static void bge_stats_update_regs(struct bge_softc *);
384static struct mbuf *bge_check_short_dma(struct mbuf *);
385static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
386 uint16_t *, uint16_t *);
387static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
388
389static void bge_intr(void *);
390static int bge_msi_intr(void *);
391static void bge_intr_task(void *, int);
392static void bge_start_locked(struct ifnet *);
393static void bge_start(struct ifnet *);
394static int bge_ioctl(struct ifnet *, u_long, caddr_t);
395static void bge_init_locked(struct bge_softc *);
396static void bge_init(void *);
397static void bge_stop(struct bge_softc *);
398static void bge_watchdog(struct bge_softc *);
399static int bge_shutdown(device_t);
400static int bge_ifmedia_upd_locked(struct ifnet *);
401static int bge_ifmedia_upd(struct ifnet *);
402static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
403
404static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
405static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
406
407static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
408static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
409
410static void bge_setpromisc(struct bge_softc *);
411static void bge_setmulti(struct bge_softc *);
412static void bge_setvlan(struct bge_softc *);
413
414static __inline void bge_rxreuse_std(struct bge_softc *, int);
415static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
416static int bge_newbuf_std(struct bge_softc *, int);
417static int bge_newbuf_jumbo(struct bge_softc *, int);
418static int bge_init_rx_ring_std(struct bge_softc *);
419static void bge_free_rx_ring_std(struct bge_softc *);
420static int bge_init_rx_ring_jumbo(struct bge_softc *);
421static void bge_free_rx_ring_jumbo(struct bge_softc *);
422static void bge_free_tx_ring(struct bge_softc *);
423static int bge_init_tx_ring(struct bge_softc *);
424
425static int bge_chipinit(struct bge_softc *);
426static int bge_blockinit(struct bge_softc *);
427
428static int bge_has_eaddr(struct bge_softc *);
429static uint32_t bge_readmem_ind(struct bge_softc *, int);
430static void bge_writemem_ind(struct bge_softc *, int, int);
431static void bge_writembx(struct bge_softc *, int, int);
432#ifdef notdef
433static uint32_t bge_readreg_ind(struct bge_softc *, int);
434#endif
435static void bge_writemem_direct(struct bge_softc *, int, int);
436static void bge_writereg_ind(struct bge_softc *, int, int);
437
438static int bge_miibus_readreg(device_t, int, int);
439static int bge_miibus_writereg(device_t, int, int, int);
440static void bge_miibus_statchg(device_t);
441#ifdef DEVICE_POLLING
442static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
443#endif
444
445#define BGE_RESET_START 1
446#define BGE_RESET_STOP 2
447static void bge_sig_post_reset(struct bge_softc *, int);
448static void bge_sig_legacy(struct bge_softc *, int);
449static void bge_sig_pre_reset(struct bge_softc *, int);
450static void bge_stop_fw(struct bge_softc *);
451static int bge_reset(struct bge_softc *);
452static void bge_link_upd(struct bge_softc *);
453
454/*
455 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
456 * leak information to untrusted users. It is also known to cause alignment
457 * traps on certain architectures.
458 */
459#ifdef BGE_REGISTER_DEBUG
460static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
461static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
462static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
463#endif
464static void bge_add_sysctls(struct bge_softc *);
465static void bge_add_sysctl_stats_regs(struct bge_softc *,
466 struct sysctl_ctx_list *, struct sysctl_oid_list *);
467static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
468 struct sysctl_oid_list *);
469static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
470
471static device_method_t bge_methods[] = {
472 /* Device interface */
473 DEVMETHOD(device_probe, bge_probe),
474 DEVMETHOD(device_attach, bge_attach),
475 DEVMETHOD(device_detach, bge_detach),
476 DEVMETHOD(device_shutdown, bge_shutdown),
477 DEVMETHOD(device_suspend, bge_suspend),
478 DEVMETHOD(device_resume, bge_resume),
479
480 /* bus interface */
481 DEVMETHOD(bus_print_child, bus_generic_print_child),
482 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
483
484 /* MII interface */
485 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
486 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
487 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
488
489 { 0, 0 }
490};
491
492static driver_t bge_driver = {
493 "bge",
494 bge_methods,
495 sizeof(struct bge_softc)
496};
497
498static devclass_t bge_devclass;
499
500DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
501DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
502
503static int bge_allow_asf = 1;
504
505TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
506
507SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
508SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
509 "Allow ASF mode if available");
510
511#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
512#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
513#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
514#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
515#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
516
517static int
518bge_has_eaddr(struct bge_softc *sc)
519{
520#ifdef __sparc64__
521 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
522 device_t dev;
523 uint32_t subvendor;
524
525 dev = sc->bge_dev;
526
527 /*
528 * The on-board BGEs found in sun4u machines aren't fitted with
529 * an EEPROM which means that we have to obtain the MAC address
530 * via OFW and that some tests will always fail. We distinguish
531 * such BGEs by the subvendor ID, which also has to be obtained
532 * from OFW instead of the PCI configuration space as the latter
533 * indicates Broadcom as the subvendor of the netboot interface.
534 * For early Blade 1500 and 2500 we even have to check the OFW
535 * device path as the subvendor ID always defaults to Broadcom
536 * there.
537 */
538 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
539 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
540 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
541 return (0);
542 memset(buf, 0, sizeof(buf));
543 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
544 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
545 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
546 return (0);
547 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
548 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
549 return (0);
550 }
551#endif
552 return (1);
553}
554
555static uint32_t
556bge_readmem_ind(struct bge_softc *sc, int off)
557{
558 device_t dev;
559 uint32_t val;
560
561 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
562 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
563 return (0);
564
565 dev = sc->bge_dev;
566
567 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
568 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
569 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
570 return (val);
571}
572
573static void
574bge_writemem_ind(struct bge_softc *sc, int off, int val)
575{
576 device_t dev;
577
578 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
579 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
580 return;
581
582 dev = sc->bge_dev;
583
584 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
585 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
586 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
587}
588
589#ifdef notdef
590static uint32_t
591bge_readreg_ind(struct bge_softc *sc, int off)
592{
593 device_t dev;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
598 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
599}
600#endif
601
602static void
603bge_writereg_ind(struct bge_softc *sc, int off, int val)
604{
605 device_t dev;
606
607 dev = sc->bge_dev;
608
609 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
611}
612
613static void
614bge_writemem_direct(struct bge_softc *sc, int off, int val)
615{
616 CSR_WRITE_4(sc, off, val);
617}
618
619static void
620bge_writembx(struct bge_softc *sc, int off, int val)
621{
622 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
623 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624
625 CSR_WRITE_4(sc, off, val);
626}
627
628/*
629 * Map a single buffer address.
630 */
631
632static void
633bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
634{
635 struct bge_dmamap_arg *ctx;
636
637 if (error)
638 return;
639
640 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
641
642 ctx = arg;
643 ctx->bge_busaddr = segs->ds_addr;
644}
645
646static uint8_t
647bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
648{
649 uint32_t access, byte = 0;
650 int i;
651
652 /* Lock. */
653 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
654 for (i = 0; i < 8000; i++) {
655 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
656 break;
657 DELAY(20);
658 }
659 if (i == 8000)
660 return (1);
661
662 /* Enable access. */
663 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
664 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
665
666 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
667 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
668 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
669 DELAY(10);
670 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
671 DELAY(10);
672 break;
673 }
674 }
675
676 if (i == BGE_TIMEOUT * 10) {
677 if_printf(sc->bge_ifp, "nvram read timed out\n");
678 return (1);
679 }
680
681 /* Get result. */
682 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
683
684 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
685
686 /* Disable access. */
687 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
688
689 /* Unlock. */
690 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
691 CSR_READ_4(sc, BGE_NVRAM_SWARB);
692
693 return (0);
694}
695
696/*
697 * Read a sequence of bytes from NVRAM.
698 */
699static int
700bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
701{
702 int err = 0, i;
703 uint8_t byte = 0;
704
705 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
706 return (1);
707
708 for (i = 0; i < cnt; i++) {
709 err = bge_nvram_getbyte(sc, off + i, &byte);
710 if (err)
711 break;
712 *(dest + i) = byte;
713 }
714
715 return (err ? 1 : 0);
716}
717
718/*
719 * Read a byte of data stored in the EEPROM at address 'addr.' The
720 * BCM570x supports both the traditional bitbang interface and an
721 * auto access interface for reading the EEPROM. We use the auto
722 * access method.
723 */
724static uint8_t
725bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
726{
727 int i;
728 uint32_t byte = 0;
729
730 /*
731 * Enable use of auto EEPROM access so we can avoid
732 * having to use the bitbang method.
733 */
734 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
735
736 /* Reset the EEPROM, load the clock period. */
737 CSR_WRITE_4(sc, BGE_EE_ADDR,
738 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
739 DELAY(20);
740
741 /* Issue the read EEPROM command. */
742 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
743
744 /* Wait for completion */
745 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
746 DELAY(10);
747 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
748 break;
749 }
750
751 if (i == BGE_TIMEOUT * 10) {
752 device_printf(sc->bge_dev, "EEPROM read timed out\n");
753 return (1);
754 }
755
756 /* Get result. */
757 byte = CSR_READ_4(sc, BGE_EE_DATA);
758
759 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
760
761 return (0);
762}
763
764/*
765 * Read a sequence of bytes from the EEPROM.
766 */
767static int
768bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
769{
770 int i, error = 0;
771 uint8_t byte = 0;
772
773 for (i = 0; i < cnt; i++) {
774 error = bge_eeprom_getbyte(sc, off + i, &byte);
775 if (error)
776 break;
777 *(dest + i) = byte;
778 }
779
780 return (error ? 1 : 0);
781}
782
783static int
784bge_miibus_readreg(device_t dev, int phy, int reg)
785{
786 struct bge_softc *sc;
787 uint32_t val;
788 int i;
789
790 sc = device_get_softc(dev);
791
792 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
793 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
794 CSR_WRITE_4(sc, BGE_MI_MODE,
795 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
796 DELAY(80);
797 }
798
799 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
800 BGE_MIPHY(phy) | BGE_MIREG(reg));
801
802 /* Poll for the PHY register access to complete. */
803 for (i = 0; i < BGE_TIMEOUT; i++) {
804 DELAY(10);
805 val = CSR_READ_4(sc, BGE_MI_COMM);
806 if ((val & BGE_MICOMM_BUSY) == 0) {
807 DELAY(5);
808 val = CSR_READ_4(sc, BGE_MI_COMM);
809 break;
810 }
811 }
812
813 if (i == BGE_TIMEOUT) {
814 device_printf(sc->bge_dev,
815 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
816 phy, reg, val);
817 val = 0;
818 }
819
820 /* Restore the autopoll bit if necessary. */
821 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
822 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
823 DELAY(80);
824 }
825
826 if (val & BGE_MICOMM_READFAIL)
827 return (0);
828
829 return (val & 0xFFFF);
830}
831
832static int
833bge_miibus_writereg(device_t dev, int phy, int reg, int val)
834{
835 struct bge_softc *sc;
836 int i;
837
838 sc = device_get_softc(dev);
839
840 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
841 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
842 return (0);
843
844 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
845 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
846 CSR_WRITE_4(sc, BGE_MI_MODE,
847 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
848 DELAY(80);
849 }
850
851 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
852 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
853
854 for (i = 0; i < BGE_TIMEOUT; i++) {
855 DELAY(10);
856 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
857 DELAY(5);
858 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
859 break;
860 }
861 }
862
863 /* Restore the autopoll bit if necessary. */
864 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
865 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
866 DELAY(80);
867 }
868
869 if (i == BGE_TIMEOUT)
870 device_printf(sc->bge_dev,
871 "PHY write timed out (phy %d, reg %d, val %d)\n",
872 phy, reg, val);
873
874 return (0);
875}
876
877static void
878bge_miibus_statchg(device_t dev)
879{
880 struct bge_softc *sc;
881 struct mii_data *mii;
882 sc = device_get_softc(dev);
883 mii = device_get_softc(sc->bge_miibus);
884
885 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
886 (IFM_ACTIVE | IFM_AVALID)) {
887 switch (IFM_SUBTYPE(mii->mii_media_active)) {
888 case IFM_10_T:
889 case IFM_100_TX:
890 sc->bge_link = 1;
891 break;
892 case IFM_1000_T:
893 case IFM_1000_SX:
894 case IFM_2500_SX:
895 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
896 sc->bge_link = 1;
897 else
898 sc->bge_link = 0;
899 break;
900 default:
901 sc->bge_link = 0;
902 break;
903 }
904 } else
905 sc->bge_link = 0;
906 if (sc->bge_link == 0)
907 return;
908 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
909 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
910 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
911 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
912 else
913 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
914
915 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
916 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
220
221 { SK_VENDORID, SK_DEVICEID_ALTIMA },
222
223 { TC_VENDORID, TC_DEVICEID_3C996 },
224
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
226 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
227 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
228
229 { 0, 0 }
230};
231
232static const struct bge_vendor {
233 uint16_t v_id;
234 const char *v_name;
235} bge_vendors[] = {
236 { ALTEON_VENDORID, "Alteon" },
237 { ALTIMA_VENDORID, "Altima" },
238 { APPLE_VENDORID, "Apple" },
239 { BCOM_VENDORID, "Broadcom" },
240 { SK_VENDORID, "SysKonnect" },
241 { TC_VENDORID, "3Com" },
242 { FJTSU_VENDORID, "Fujitsu" },
243
244 { 0, NULL }
245};
246
247static const struct bge_revision {
248 uint32_t br_chipid;
249 const char *br_name;
250} bge_revisions[] = {
251 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
252 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
253 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
254 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
255 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
256 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
257 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
258 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
259 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
260 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
261 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
262 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
263 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
264 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
265 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
266 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
267 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
268 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
269 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
270 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
271 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
272 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
273 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
274 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
275 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
276 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
277 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
278 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
279 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
280 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
281 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
282 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
283 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
284 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
285 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
286 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
287 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
288 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
289 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
290 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
291 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
292 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
293 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
294 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
295 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
296 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
297 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
298 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
299 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
300 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
301 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
302 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
303 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
304 /* 5754 and 5787 share the same ASIC ID */
305 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
306 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
307 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
308 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
309 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
310 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
311 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
312
313 { 0, NULL }
314};
315
316/*
317 * Some defaults for major revisions, so that newer steppings
318 * that we don't know about have a shot at working.
319 */
320static const struct bge_revision bge_majorrevs[] = {
321 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
322 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
323 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
324 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
325 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
326 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
327 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
328 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
329 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
330 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
331 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
332 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
333 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
334 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
335 /* 5754 and 5787 share the same ASIC ID */
336 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
337 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
338 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
339 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
340
341 { 0, NULL }
342};
343
344#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
345#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
346#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
347#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
348#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
349#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
350#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
351
352const struct bge_revision * bge_lookup_rev(uint32_t);
353const struct bge_vendor * bge_lookup_vendor(uint16_t);
354
355typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
356
357static int bge_probe(device_t);
358static int bge_attach(device_t);
359static int bge_detach(device_t);
360static int bge_suspend(device_t);
361static int bge_resume(device_t);
362static void bge_release_resources(struct bge_softc *);
363static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
364static int bge_dma_alloc(struct bge_softc *);
365static void bge_dma_free(struct bge_softc *);
366static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
367 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
368
369static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
370static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
371static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
372static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
373static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
374
375static void bge_txeof(struct bge_softc *, uint16_t);
376static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
377static int bge_rxeof(struct bge_softc *, uint16_t, int);
378
379static void bge_asf_driver_up (struct bge_softc *);
380static void bge_tick(void *);
381static void bge_stats_clear_regs(struct bge_softc *);
382static void bge_stats_update(struct bge_softc *);
383static void bge_stats_update_regs(struct bge_softc *);
384static struct mbuf *bge_check_short_dma(struct mbuf *);
385static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
386 uint16_t *, uint16_t *);
387static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
388
389static void bge_intr(void *);
390static int bge_msi_intr(void *);
391static void bge_intr_task(void *, int);
392static void bge_start_locked(struct ifnet *);
393static void bge_start(struct ifnet *);
394static int bge_ioctl(struct ifnet *, u_long, caddr_t);
395static void bge_init_locked(struct bge_softc *);
396static void bge_init(void *);
397static void bge_stop(struct bge_softc *);
398static void bge_watchdog(struct bge_softc *);
399static int bge_shutdown(device_t);
400static int bge_ifmedia_upd_locked(struct ifnet *);
401static int bge_ifmedia_upd(struct ifnet *);
402static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
403
404static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
405static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
406
407static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
408static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
409
410static void bge_setpromisc(struct bge_softc *);
411static void bge_setmulti(struct bge_softc *);
412static void bge_setvlan(struct bge_softc *);
413
414static __inline void bge_rxreuse_std(struct bge_softc *, int);
415static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
416static int bge_newbuf_std(struct bge_softc *, int);
417static int bge_newbuf_jumbo(struct bge_softc *, int);
418static int bge_init_rx_ring_std(struct bge_softc *);
419static void bge_free_rx_ring_std(struct bge_softc *);
420static int bge_init_rx_ring_jumbo(struct bge_softc *);
421static void bge_free_rx_ring_jumbo(struct bge_softc *);
422static void bge_free_tx_ring(struct bge_softc *);
423static int bge_init_tx_ring(struct bge_softc *);
424
425static int bge_chipinit(struct bge_softc *);
426static int bge_blockinit(struct bge_softc *);
427
428static int bge_has_eaddr(struct bge_softc *);
429static uint32_t bge_readmem_ind(struct bge_softc *, int);
430static void bge_writemem_ind(struct bge_softc *, int, int);
431static void bge_writembx(struct bge_softc *, int, int);
432#ifdef notdef
433static uint32_t bge_readreg_ind(struct bge_softc *, int);
434#endif
435static void bge_writemem_direct(struct bge_softc *, int, int);
436static void bge_writereg_ind(struct bge_softc *, int, int);
437
438static int bge_miibus_readreg(device_t, int, int);
439static int bge_miibus_writereg(device_t, int, int, int);
440static void bge_miibus_statchg(device_t);
441#ifdef DEVICE_POLLING
442static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
443#endif
444
445#define BGE_RESET_START 1
446#define BGE_RESET_STOP 2
447static void bge_sig_post_reset(struct bge_softc *, int);
448static void bge_sig_legacy(struct bge_softc *, int);
449static void bge_sig_pre_reset(struct bge_softc *, int);
450static void bge_stop_fw(struct bge_softc *);
451static int bge_reset(struct bge_softc *);
452static void bge_link_upd(struct bge_softc *);
453
454/*
455 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
456 * leak information to untrusted users. It is also known to cause alignment
457 * traps on certain architectures.
458 */
459#ifdef BGE_REGISTER_DEBUG
460static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
461static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
462static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
463#endif
464static void bge_add_sysctls(struct bge_softc *);
465static void bge_add_sysctl_stats_regs(struct bge_softc *,
466 struct sysctl_ctx_list *, struct sysctl_oid_list *);
467static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
468 struct sysctl_oid_list *);
469static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
470
471static device_method_t bge_methods[] = {
472 /* Device interface */
473 DEVMETHOD(device_probe, bge_probe),
474 DEVMETHOD(device_attach, bge_attach),
475 DEVMETHOD(device_detach, bge_detach),
476 DEVMETHOD(device_shutdown, bge_shutdown),
477 DEVMETHOD(device_suspend, bge_suspend),
478 DEVMETHOD(device_resume, bge_resume),
479
480 /* bus interface */
481 DEVMETHOD(bus_print_child, bus_generic_print_child),
482 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
483
484 /* MII interface */
485 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
486 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
487 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
488
489 { 0, 0 }
490};
491
492static driver_t bge_driver = {
493 "bge",
494 bge_methods,
495 sizeof(struct bge_softc)
496};
497
498static devclass_t bge_devclass;
499
500DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
501DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
502
503static int bge_allow_asf = 1;
504
505TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
506
507SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
508SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
509 "Allow ASF mode if available");
510
511#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
512#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
513#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
514#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
515#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
516
517static int
518bge_has_eaddr(struct bge_softc *sc)
519{
520#ifdef __sparc64__
521 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
522 device_t dev;
523 uint32_t subvendor;
524
525 dev = sc->bge_dev;
526
527 /*
528 * The on-board BGEs found in sun4u machines aren't fitted with
529 * an EEPROM which means that we have to obtain the MAC address
530 * via OFW and that some tests will always fail. We distinguish
531 * such BGEs by the subvendor ID, which also has to be obtained
532 * from OFW instead of the PCI configuration space as the latter
533 * indicates Broadcom as the subvendor of the netboot interface.
534 * For early Blade 1500 and 2500 we even have to check the OFW
535 * device path as the subvendor ID always defaults to Broadcom
536 * there.
537 */
538 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
539 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
540 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
541 return (0);
542 memset(buf, 0, sizeof(buf));
543 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
544 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
545 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
546 return (0);
547 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
548 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
549 return (0);
550 }
551#endif
552 return (1);
553}
554
555static uint32_t
556bge_readmem_ind(struct bge_softc *sc, int off)
557{
558 device_t dev;
559 uint32_t val;
560
561 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
562 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
563 return (0);
564
565 dev = sc->bge_dev;
566
567 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
568 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
569 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
570 return (val);
571}
572
573static void
574bge_writemem_ind(struct bge_softc *sc, int off, int val)
575{
576 device_t dev;
577
578 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
579 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
580 return;
581
582 dev = sc->bge_dev;
583
584 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
585 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
586 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
587}
588
589#ifdef notdef
590static uint32_t
591bge_readreg_ind(struct bge_softc *sc, int off)
592{
593 device_t dev;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
598 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
599}
600#endif
601
602static void
603bge_writereg_ind(struct bge_softc *sc, int off, int val)
604{
605 device_t dev;
606
607 dev = sc->bge_dev;
608
609 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
611}
612
613static void
614bge_writemem_direct(struct bge_softc *sc, int off, int val)
615{
616 CSR_WRITE_4(sc, off, val);
617}
618
619static void
620bge_writembx(struct bge_softc *sc, int off, int val)
621{
622 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
623 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624
625 CSR_WRITE_4(sc, off, val);
626}
627
628/*
629 * Map a single buffer address.
630 */
631
632static void
633bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
634{
635 struct bge_dmamap_arg *ctx;
636
637 if (error)
638 return;
639
640 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
641
642 ctx = arg;
643 ctx->bge_busaddr = segs->ds_addr;
644}
645
646static uint8_t
647bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
648{
649 uint32_t access, byte = 0;
650 int i;
651
652 /* Lock. */
653 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
654 for (i = 0; i < 8000; i++) {
655 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
656 break;
657 DELAY(20);
658 }
659 if (i == 8000)
660 return (1);
661
662 /* Enable access. */
663 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
664 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
665
666 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
667 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
668 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
669 DELAY(10);
670 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
671 DELAY(10);
672 break;
673 }
674 }
675
676 if (i == BGE_TIMEOUT * 10) {
677 if_printf(sc->bge_ifp, "nvram read timed out\n");
678 return (1);
679 }
680
681 /* Get result. */
682 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
683
684 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
685
686 /* Disable access. */
687 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
688
689 /* Unlock. */
690 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
691 CSR_READ_4(sc, BGE_NVRAM_SWARB);
692
693 return (0);
694}
695
696/*
697 * Read a sequence of bytes from NVRAM.
698 */
699static int
700bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
701{
702 int err = 0, i;
703 uint8_t byte = 0;
704
705 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
706 return (1);
707
708 for (i = 0; i < cnt; i++) {
709 err = bge_nvram_getbyte(sc, off + i, &byte);
710 if (err)
711 break;
712 *(dest + i) = byte;
713 }
714
715 return (err ? 1 : 0);
716}
717
718/*
719 * Read a byte of data stored in the EEPROM at address 'addr.' The
720 * BCM570x supports both the traditional bitbang interface and an
721 * auto access interface for reading the EEPROM. We use the auto
722 * access method.
723 */
724static uint8_t
725bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
726{
727 int i;
728 uint32_t byte = 0;
729
730 /*
731 * Enable use of auto EEPROM access so we can avoid
732 * having to use the bitbang method.
733 */
734 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
735
736 /* Reset the EEPROM, load the clock period. */
737 CSR_WRITE_4(sc, BGE_EE_ADDR,
738 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
739 DELAY(20);
740
741 /* Issue the read EEPROM command. */
742 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
743
744 /* Wait for completion */
745 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
746 DELAY(10);
747 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
748 break;
749 }
750
751 if (i == BGE_TIMEOUT * 10) {
752 device_printf(sc->bge_dev, "EEPROM read timed out\n");
753 return (1);
754 }
755
756 /* Get result. */
757 byte = CSR_READ_4(sc, BGE_EE_DATA);
758
759 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
760
761 return (0);
762}
763
764/*
765 * Read a sequence of bytes from the EEPROM.
766 */
767static int
768bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
769{
770 int i, error = 0;
771 uint8_t byte = 0;
772
773 for (i = 0; i < cnt; i++) {
774 error = bge_eeprom_getbyte(sc, off + i, &byte);
775 if (error)
776 break;
777 *(dest + i) = byte;
778 }
779
780 return (error ? 1 : 0);
781}
782
783static int
784bge_miibus_readreg(device_t dev, int phy, int reg)
785{
786 struct bge_softc *sc;
787 uint32_t val;
788 int i;
789
790 sc = device_get_softc(dev);
791
792 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
793 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
794 CSR_WRITE_4(sc, BGE_MI_MODE,
795 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
796 DELAY(80);
797 }
798
799 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
800 BGE_MIPHY(phy) | BGE_MIREG(reg));
801
802 /* Poll for the PHY register access to complete. */
803 for (i = 0; i < BGE_TIMEOUT; i++) {
804 DELAY(10);
805 val = CSR_READ_4(sc, BGE_MI_COMM);
806 if ((val & BGE_MICOMM_BUSY) == 0) {
807 DELAY(5);
808 val = CSR_READ_4(sc, BGE_MI_COMM);
809 break;
810 }
811 }
812
813 if (i == BGE_TIMEOUT) {
814 device_printf(sc->bge_dev,
815 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
816 phy, reg, val);
817 val = 0;
818 }
819
820 /* Restore the autopoll bit if necessary. */
821 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
822 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
823 DELAY(80);
824 }
825
826 if (val & BGE_MICOMM_READFAIL)
827 return (0);
828
829 return (val & 0xFFFF);
830}
831
832static int
833bge_miibus_writereg(device_t dev, int phy, int reg, int val)
834{
835 struct bge_softc *sc;
836 int i;
837
838 sc = device_get_softc(dev);
839
840 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
841 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
842 return (0);
843
844 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
845 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
846 CSR_WRITE_4(sc, BGE_MI_MODE,
847 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
848 DELAY(80);
849 }
850
851 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
852 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
853
854 for (i = 0; i < BGE_TIMEOUT; i++) {
855 DELAY(10);
856 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
857 DELAY(5);
858 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
859 break;
860 }
861 }
862
863 /* Restore the autopoll bit if necessary. */
864 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
865 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
866 DELAY(80);
867 }
868
869 if (i == BGE_TIMEOUT)
870 device_printf(sc->bge_dev,
871 "PHY write timed out (phy %d, reg %d, val %d)\n",
872 phy, reg, val);
873
874 return (0);
875}
876
877static void
878bge_miibus_statchg(device_t dev)
879{
880 struct bge_softc *sc;
881 struct mii_data *mii;
882 sc = device_get_softc(dev);
883 mii = device_get_softc(sc->bge_miibus);
884
885 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
886 (IFM_ACTIVE | IFM_AVALID)) {
887 switch (IFM_SUBTYPE(mii->mii_media_active)) {
888 case IFM_10_T:
889 case IFM_100_TX:
890 sc->bge_link = 1;
891 break;
892 case IFM_1000_T:
893 case IFM_1000_SX:
894 case IFM_2500_SX:
895 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
896 sc->bge_link = 1;
897 else
898 sc->bge_link = 0;
899 break;
900 default:
901 sc->bge_link = 0;
902 break;
903 }
904 } else
905 sc->bge_link = 0;
906 if (sc->bge_link == 0)
907 return;
908 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
909 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
910 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
911 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
912 else
913 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
914
915 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
916 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
917 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG1)
917 if ((IFM_OPTIONS(mii->mii_media_active) &
918 IFM_ETH_TXPAUSE) != 0)
918 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
919 else
920 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
919 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
920 else
921 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
921 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0)
922 if ((IFM_OPTIONS(mii->mii_media_active) &
923 IFM_ETH_RXPAUSE) != 0)
922 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
923 else
924 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
925 } else {
926 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
927 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
928 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
929 }
930}
931
932/*
933 * Intialize a standard receive ring descriptor.
934 */
935static int
936bge_newbuf_std(struct bge_softc *sc, int i)
937{
938 struct mbuf *m;
939 struct bge_rx_bd *r;
940 bus_dma_segment_t segs[1];
941 bus_dmamap_t map;
942 int error, nsegs;
943
944 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
945 if (m == NULL)
946 return (ENOBUFS);
947 m->m_len = m->m_pkthdr.len = MCLBYTES;
948 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
949 m_adj(m, ETHER_ALIGN);
950
951 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
952 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
953 if (error != 0) {
954 m_freem(m);
955 return (error);
956 }
957 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
958 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
959 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
960 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
961 sc->bge_cdata.bge_rx_std_dmamap[i]);
962 }
963 map = sc->bge_cdata.bge_rx_std_dmamap[i];
964 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
965 sc->bge_cdata.bge_rx_std_sparemap = map;
966 sc->bge_cdata.bge_rx_std_chain[i] = m;
967 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
968 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
969 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
970 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
971 r->bge_flags = BGE_RXBDFLAG_END;
972 r->bge_len = segs[0].ds_len;
973 r->bge_idx = i;
974
975 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
976 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
977
978 return (0);
979}
980
981/*
982 * Initialize a jumbo receive ring descriptor. This allocates
983 * a jumbo buffer from the pool managed internally by the driver.
984 */
985static int
986bge_newbuf_jumbo(struct bge_softc *sc, int i)
987{
988 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
989 bus_dmamap_t map;
990 struct bge_extrx_bd *r;
991 struct mbuf *m;
992 int error, nsegs;
993
994 MGETHDR(m, M_DONTWAIT, MT_DATA);
995 if (m == NULL)
996 return (ENOBUFS);
997
998 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
999 if (!(m->m_flags & M_EXT)) {
1000 m_freem(m);
1001 return (ENOBUFS);
1002 }
1003 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1004 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1005 m_adj(m, ETHER_ALIGN);
1006
1007 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1008 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1009 if (error != 0) {
1010 m_freem(m);
1011 return (error);
1012 }
1013
1014 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1015 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1016 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1017 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1018 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1019 }
1020 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1021 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1022 sc->bge_cdata.bge_rx_jumbo_sparemap;
1023 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1024 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1025 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1026 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1027 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1028 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1029
1030 /*
1031 * Fill in the extended RX buffer descriptor.
1032 */
1033 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1034 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1035 r->bge_idx = i;
1036 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1037 switch (nsegs) {
1038 case 4:
1039 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1040 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1041 r->bge_len3 = segs[3].ds_len;
1042 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1043 case 3:
1044 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1045 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1046 r->bge_len2 = segs[2].ds_len;
1047 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1048 case 2:
1049 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1050 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1051 r->bge_len1 = segs[1].ds_len;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1053 case 1:
1054 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1055 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1056 r->bge_len0 = segs[0].ds_len;
1057 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1058 break;
1059 default:
1060 panic("%s: %d segments\n", __func__, nsegs);
1061 }
1062
1063 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1064 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1065
1066 return (0);
1067}
1068
1069static int
1070bge_init_rx_ring_std(struct bge_softc *sc)
1071{
1072 int error, i;
1073
1074 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1075 sc->bge_std = 0;
1076 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1077 if ((error = bge_newbuf_std(sc, i)) != 0)
1078 return (error);
1079 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1080 }
1081
1082 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1083 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1084
1085 sc->bge_std = 0;
1086 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1087
1088 return (0);
1089}
1090
1091static void
1092bge_free_rx_ring_std(struct bge_softc *sc)
1093{
1094 int i;
1095
1096 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1097 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1098 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1099 sc->bge_cdata.bge_rx_std_dmamap[i],
1100 BUS_DMASYNC_POSTREAD);
1101 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1102 sc->bge_cdata.bge_rx_std_dmamap[i]);
1103 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1104 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1105 }
1106 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1107 sizeof(struct bge_rx_bd));
1108 }
1109}
1110
1111static int
1112bge_init_rx_ring_jumbo(struct bge_softc *sc)
1113{
1114 struct bge_rcb *rcb;
1115 int error, i;
1116
1117 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1118 sc->bge_jumbo = 0;
1119 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1120 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1121 return (error);
1122 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1123 }
1124
1125 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1126 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1127
1128 sc->bge_jumbo = 0;
1129
1130 /* Enable the jumbo receive producer ring. */
1131 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1132 rcb->bge_maxlen_flags =
1133 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1134 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1135
1136 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1137
1138 return (0);
1139}
1140
1141static void
1142bge_free_rx_ring_jumbo(struct bge_softc *sc)
1143{
1144 int i;
1145
1146 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1147 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1148 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1149 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1150 BUS_DMASYNC_POSTREAD);
1151 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1152 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1153 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1154 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1155 }
1156 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1157 sizeof(struct bge_extrx_bd));
1158 }
1159}
1160
1161static void
1162bge_free_tx_ring(struct bge_softc *sc)
1163{
1164 int i;
1165
1166 if (sc->bge_ldata.bge_tx_ring == NULL)
1167 return;
1168
1169 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1170 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1171 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1172 sc->bge_cdata.bge_tx_dmamap[i],
1173 BUS_DMASYNC_POSTWRITE);
1174 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1175 sc->bge_cdata.bge_tx_dmamap[i]);
1176 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1177 sc->bge_cdata.bge_tx_chain[i] = NULL;
1178 }
1179 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1180 sizeof(struct bge_tx_bd));
1181 }
1182}
1183
1184static int
1185bge_init_tx_ring(struct bge_softc *sc)
1186{
1187 sc->bge_txcnt = 0;
1188 sc->bge_tx_saved_considx = 0;
1189
1190 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1191 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1192 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1193
1194 /* Initialize transmit producer index for host-memory send ring. */
1195 sc->bge_tx_prodidx = 0;
1196 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1197
1198 /* 5700 b2 errata */
1199 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1200 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1201
1202 /* NIC-memory send ring not used; initialize to zero. */
1203 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1204 /* 5700 b2 errata */
1205 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1206 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1207
1208 return (0);
1209}
1210
1211static void
1212bge_setpromisc(struct bge_softc *sc)
1213{
1214 struct ifnet *ifp;
1215
1216 BGE_LOCK_ASSERT(sc);
1217
1218 ifp = sc->bge_ifp;
1219
1220 /* Enable or disable promiscuous mode as needed. */
1221 if (ifp->if_flags & IFF_PROMISC)
1222 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1223 else
1224 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1225}
1226
1227static void
1228bge_setmulti(struct bge_softc *sc)
1229{
1230 struct ifnet *ifp;
1231 struct ifmultiaddr *ifma;
1232 uint32_t hashes[4] = { 0, 0, 0, 0 };
1233 int h, i;
1234
1235 BGE_LOCK_ASSERT(sc);
1236
1237 ifp = sc->bge_ifp;
1238
1239 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1240 for (i = 0; i < 4; i++)
1241 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1242 return;
1243 }
1244
1245 /* First, zot all the existing filters. */
1246 for (i = 0; i < 4; i++)
1247 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1248
1249 /* Now program new ones. */
1250 if_maddr_rlock(ifp);
1251 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1252 if (ifma->ifma_addr->sa_family != AF_LINK)
1253 continue;
1254 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1255 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1256 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1257 }
1258 if_maddr_runlock(ifp);
1259
1260 for (i = 0; i < 4; i++)
1261 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1262}
1263
1264static void
1265bge_setvlan(struct bge_softc *sc)
1266{
1267 struct ifnet *ifp;
1268
1269 BGE_LOCK_ASSERT(sc);
1270
1271 ifp = sc->bge_ifp;
1272
1273 /* Enable or disable VLAN tag stripping as needed. */
1274 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1275 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1276 else
1277 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1278}
1279
1280static void
1281bge_sig_pre_reset(struct bge_softc *sc, int type)
1282{
1283
1284 /*
1285 * Some chips don't like this so only do this if ASF is enabled
1286 */
1287 if (sc->bge_asf_mode)
1288 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1289
1290 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1291 switch (type) {
1292 case BGE_RESET_START:
1293 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1294 break;
1295 case BGE_RESET_STOP:
1296 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1297 break;
1298 }
1299 }
1300}
1301
1302static void
1303bge_sig_post_reset(struct bge_softc *sc, int type)
1304{
1305
1306 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1307 switch (type) {
1308 case BGE_RESET_START:
1309 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1310 /* START DONE */
1311 break;
1312 case BGE_RESET_STOP:
1313 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1314 break;
1315 }
1316 }
1317}
1318
1319static void
1320bge_sig_legacy(struct bge_softc *sc, int type)
1321{
1322
1323 if (sc->bge_asf_mode) {
1324 switch (type) {
1325 case BGE_RESET_START:
1326 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1327 break;
1328 case BGE_RESET_STOP:
1329 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1330 break;
1331 }
1332 }
1333}
1334
1335static void
1336bge_stop_fw(struct bge_softc *sc)
1337{
1338 int i;
1339
1340 if (sc->bge_asf_mode) {
1341 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1342 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1343 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1344
1345 for (i = 0; i < 100; i++ ) {
1346 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1347 break;
1348 DELAY(10);
1349 }
1350 }
1351}
1352
1353/*
1354 * Do endian, PCI and DMA initialization.
1355 */
1356static int
1357bge_chipinit(struct bge_softc *sc)
1358{
1359 uint32_t dma_rw_ctl, misc_ctl;
1360 uint16_t val;
1361 int i;
1362
1363 /* Set endianness before we access any non-PCI registers. */
1364 misc_ctl = BGE_INIT;
1365 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1366 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1367 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1368
1369 /* Clear the MAC control register */
1370 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1371
1372 /*
1373 * Clear the MAC statistics block in the NIC's
1374 * internal memory.
1375 */
1376 for (i = BGE_STATS_BLOCK;
1377 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1378 BGE_MEMWIN_WRITE(sc, i, 0);
1379
1380 for (i = BGE_STATUS_BLOCK;
1381 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1382 BGE_MEMWIN_WRITE(sc, i, 0);
1383
1384 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1385 /*
1386 * Fix data corruption caused by non-qword write with WB.
1387 * Fix master abort in PCI mode.
1388 * Fix PCI latency timer.
1389 */
1390 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1391 val |= (1 << 10) | (1 << 12) | (1 << 13);
1392 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1393 }
1394
1395 /*
1396 * Set up the PCI DMA control register.
1397 */
1398 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1399 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1400 if (sc->bge_flags & BGE_FLAG_PCIE) {
1401 /* Read watermark not used, 128 bytes for write. */
1402 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1403 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1404 if (BGE_IS_5714_FAMILY(sc)) {
1405 /* 256 bytes for read and write. */
1406 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1407 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1408 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1409 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1410 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1411 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1412 /*
1413 * In the BCM5703, the DMA read watermark should
1414 * be set to less than or equal to the maximum
1415 * memory read byte count of the PCI-X command
1416 * register.
1417 */
1418 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1419 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1420 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1421 /* 1536 bytes for read, 384 bytes for write. */
1422 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1423 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1424 } else {
1425 /* 384 bytes for read and write. */
1426 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1427 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1428 0x0F;
1429 }
1430 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1431 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1432 uint32_t tmp;
1433
1434 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1435 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1436 if (tmp == 6 || tmp == 7)
1437 dma_rw_ctl |=
1438 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1439
1440 /* Set PCI-X DMA write workaround. */
1441 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1442 }
1443 } else {
1444 /* Conventional PCI bus: 256 bytes for read and write. */
1445 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1446 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1447
1448 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1449 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1450 dma_rw_ctl |= 0x0F;
1451 }
1452 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1453 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1454 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1455 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1456 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1457 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1458 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1459 if (BGE_IS_5717_PLUS(sc))
1460 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1461 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1462
1463 /*
1464 * Set up general mode register.
1465 */
1466 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1467 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1468 BGE_MODECTL_TX_NO_PHDR_CSUM);
1469
1470 /*
1471 * BCM5701 B5 have a bug causing data corruption when using
1472 * 64-bit DMA reads, which can be terminated early and then
1473 * completed later as 32-bit accesses, in combination with
1474 * certain bridges.
1475 */
1476 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1477 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1478 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1479
1480 /*
1481 * Tell the firmware the driver is running
1482 */
1483 if (sc->bge_asf_mode & ASF_STACKUP)
1484 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1485
1486 /*
1487 * Disable memory write invalidate. Apparently it is not supported
1488 * properly by these devices. Also ensure that INTx isn't disabled,
1489 * as these chips need it even when using MSI.
1490 */
1491 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1492 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1493
1494 /* Set the timer prescaler (always 66Mhz) */
1495 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1496
1497 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1498 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1499 DELAY(40); /* XXX */
1500
1501 /* Put PHY into ready state */
1502 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1503 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1504 DELAY(40);
1505 }
1506
1507 return (0);
1508}
1509
1510static int
1511bge_blockinit(struct bge_softc *sc)
1512{
1513 struct bge_rcb *rcb;
1514 bus_size_t vrcb;
1515 bge_hostaddr taddr;
1516 uint32_t val;
1517 int i, limit;
1518
1519 /*
1520 * Initialize the memory window pointer register so that
1521 * we can access the first 32K of internal NIC RAM. This will
1522 * allow us to set up the TX send ring RCBs and the RX return
1523 * ring RCBs, plus other things which live in NIC memory.
1524 */
1525 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1526
1527 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1528
1529 if (!(BGE_IS_5705_PLUS(sc))) {
1530 /* Configure mbuf memory pool */
1531 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1532 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1533 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1534 else
1535 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1536
1537 /* Configure DMA resource pool */
1538 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1539 BGE_DMA_DESCRIPTORS);
1540 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1541 }
1542
1543 /* Configure mbuf pool watermarks */
1544 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1545 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1546 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1547 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1548 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1549 } else {
1550 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1551 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1552 }
1553 } else if (!BGE_IS_5705_PLUS(sc)) {
1554 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1555 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1556 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1557 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1558 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1559 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1560 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1561 } else {
1562 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1563 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1564 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1565 }
1566
1567 /* Configure DMA resource watermarks */
1568 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1569 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1570
1571 /* Enable buffer manager */
1572 if (!(BGE_IS_5705_PLUS(sc))) {
1573 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1574 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1575
1576 /* Poll for buffer manager start indication */
1577 for (i = 0; i < BGE_TIMEOUT; i++) {
1578 DELAY(10);
1579 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1580 break;
1581 }
1582
1583 if (i == BGE_TIMEOUT) {
1584 device_printf(sc->bge_dev,
1585 "buffer manager failed to start\n");
1586 return (ENXIO);
1587 }
1588 }
1589
1590 /* Enable flow-through queues */
1591 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1592 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1593
1594 /* Wait until queue initialization is complete */
1595 for (i = 0; i < BGE_TIMEOUT; i++) {
1596 DELAY(10);
1597 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1598 break;
1599 }
1600
1601 if (i == BGE_TIMEOUT) {
1602 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1603 return (ENXIO);
1604 }
1605
1606 /*
1607 * Summary of rings supported by the controller:
1608 *
1609 * Standard Receive Producer Ring
1610 * - This ring is used to feed receive buffers for "standard"
1611 * sized frames (typically 1536 bytes) to the controller.
1612 *
1613 * Jumbo Receive Producer Ring
1614 * - This ring is used to feed receive buffers for jumbo sized
1615 * frames (i.e. anything bigger than the "standard" frames)
1616 * to the controller.
1617 *
1618 * Mini Receive Producer Ring
1619 * - This ring is used to feed receive buffers for "mini"
1620 * sized frames to the controller.
1621 * - This feature required external memory for the controller
1622 * but was never used in a production system. Should always
1623 * be disabled.
1624 *
1625 * Receive Return Ring
1626 * - After the controller has placed an incoming frame into a
1627 * receive buffer that buffer is moved into a receive return
1628 * ring. The driver is then responsible to passing the
1629 * buffer up to the stack. Many versions of the controller
1630 * support multiple RR rings.
1631 *
1632 * Send Ring
1633 * - This ring is used for outgoing frames. Many versions of
1634 * the controller support multiple send rings.
1635 */
1636
1637 /* Initialize the standard receive producer ring control block. */
1638 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1639 rcb->bge_hostaddr.bge_addr_lo =
1640 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1641 rcb->bge_hostaddr.bge_addr_hi =
1642 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1643 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1644 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1645 if (BGE_IS_5717_PLUS(sc)) {
1646 /*
1647 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1648 * Bits 15-2 : Maximum RX frame size
1649 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1650 * Bit 0 : Reserved
1651 */
1652 rcb->bge_maxlen_flags =
1653 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1654 } else if (BGE_IS_5705_PLUS(sc)) {
1655 /*
1656 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1657 * Bits 15-2 : Reserved (should be 0)
1658 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1659 * Bit 0 : Reserved
1660 */
1661 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1662 } else {
1663 /*
1664 * Ring size is always XXX entries
1665 * Bits 31-16: Maximum RX frame size
1666 * Bits 15-2 : Reserved (should be 0)
1667 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1668 * Bit 0 : Reserved
1669 */
1670 rcb->bge_maxlen_flags =
1671 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1672 }
1673 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1674 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1675 else
1676 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1677 /* Write the standard receive producer ring control block. */
1678 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1679 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1680 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1681 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1682
1683 /* Reset the standard receive producer ring producer index. */
1684 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1685
1686 /*
1687 * Initialize the jumbo RX producer ring control
1688 * block. We set the 'ring disabled' bit in the
1689 * flags field until we're actually ready to start
1690 * using this ring (i.e. once we set the MTU
1691 * high enough to require it).
1692 */
1693 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1694 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1695 /* Get the jumbo receive producer ring RCB parameters. */
1696 rcb->bge_hostaddr.bge_addr_lo =
1697 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1698 rcb->bge_hostaddr.bge_addr_hi =
1699 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1700 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1701 sc->bge_cdata.bge_rx_jumbo_ring_map,
1702 BUS_DMASYNC_PREREAD);
1703 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1704 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1705 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1706 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1707 else
1708 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1709 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1710 rcb->bge_hostaddr.bge_addr_hi);
1711 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1712 rcb->bge_hostaddr.bge_addr_lo);
1713 /* Program the jumbo receive producer ring RCB parameters. */
1714 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1715 rcb->bge_maxlen_flags);
1716 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1717 /* Reset the jumbo receive producer ring producer index. */
1718 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1719 }
1720
1721 /* Disable the mini receive producer ring RCB. */
1722 if (BGE_IS_5700_FAMILY(sc)) {
1723 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1724 rcb->bge_maxlen_flags =
1725 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1726 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1727 rcb->bge_maxlen_flags);
1728 /* Reset the mini receive producer ring producer index. */
1729 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1730 }
1731
1732 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1733 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1734 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1735 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1736 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1737 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1738 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1739 }
1740 /*
1741 * The BD ring replenish thresholds control how often the
1742 * hardware fetches new BD's from the producer rings in host
1743 * memory. Setting the value too low on a busy system can
1744 * starve the hardware and recue the throughpout.
1745 *
1746 * Set the BD ring replentish thresholds. The recommended
1747 * values are 1/8th the number of descriptors allocated to
1748 * each ring.
1749 * XXX The 5754 requires a lower threshold, so it might be a
1750 * requirement of all 575x family chips. The Linux driver sets
1751 * the lower threshold for all 5705 family chips as well, but there
1752 * are reports that it might not need to be so strict.
1753 *
1754 * XXX Linux does some extra fiddling here for the 5906 parts as
1755 * well.
1756 */
1757 if (BGE_IS_5705_PLUS(sc))
1758 val = 8;
1759 else
1760 val = BGE_STD_RX_RING_CNT / 8;
1761 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1762 if (BGE_IS_JUMBO_CAPABLE(sc))
1763 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1764 BGE_JUMBO_RX_RING_CNT/8);
1765 if (BGE_IS_5717_PLUS(sc)) {
1766 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1767 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1768 }
1769
1770 /*
1771 * Disable all send rings by setting the 'ring disabled' bit
1772 * in the flags field of all the TX send ring control blocks,
1773 * located in NIC memory.
1774 */
1775 if (!BGE_IS_5705_PLUS(sc))
1776 /* 5700 to 5704 had 16 send rings. */
1777 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1778 else
1779 limit = 1;
1780 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1781 for (i = 0; i < limit; i++) {
1782 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1783 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1784 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1785 vrcb += sizeof(struct bge_rcb);
1786 }
1787
1788 /* Configure send ring RCB 0 (we use only the first ring) */
1789 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1790 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1791 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1792 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1793 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1794 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1795 else
1796 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1797 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1798 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1799 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1800
1801 /*
1802 * Disable all receive return rings by setting the
1803 * 'ring diabled' bit in the flags field of all the receive
1804 * return ring control blocks, located in NIC memory.
1805 */
1806 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1807 /* Should be 17, use 16 until we get an SRAM map. */
1808 limit = 16;
1809 } else if (!BGE_IS_5705_PLUS(sc))
1810 limit = BGE_RX_RINGS_MAX;
1811 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1812 limit = 4;
1813 else
1814 limit = 1;
1815 /* Disable all receive return rings. */
1816 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1817 for (i = 0; i < limit; i++) {
1818 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1819 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1820 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1821 BGE_RCB_FLAG_RING_DISABLED);
1822 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1823 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1824 (i * (sizeof(uint64_t))), 0);
1825 vrcb += sizeof(struct bge_rcb);
1826 }
1827
1828 /*
1829 * Set up receive return ring 0. Note that the NIC address
1830 * for RX return rings is 0x0. The return rings live entirely
1831 * within the host, so the nicaddr field in the RCB isn't used.
1832 */
1833 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1834 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1835 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1836 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1837 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1838 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1839 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1840
1841 /* Set random backoff seed for TX */
1842 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1843 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1844 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1845 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1846 BGE_TX_BACKOFF_SEED_MASK);
1847
1848 /* Set inter-packet gap */
1849 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1850
1851 /*
1852 * Specify which ring to use for packets that don't match
1853 * any RX rules.
1854 */
1855 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1856
1857 /*
1858 * Configure number of RX lists. One interrupt distribution
1859 * list, sixteen active lists, one bad frames class.
1860 */
1861 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1862
1863 /* Inialize RX list placement stats mask. */
1864 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1865 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1866
1867 /* Disable host coalescing until we get it set up */
1868 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1869
1870 /* Poll to make sure it's shut down. */
1871 for (i = 0; i < BGE_TIMEOUT; i++) {
1872 DELAY(10);
1873 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1874 break;
1875 }
1876
1877 if (i == BGE_TIMEOUT) {
1878 device_printf(sc->bge_dev,
1879 "host coalescing engine failed to idle\n");
1880 return (ENXIO);
1881 }
1882
1883 /* Set up host coalescing defaults */
1884 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1885 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1886 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1887 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1888 if (!(BGE_IS_5705_PLUS(sc))) {
1889 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1890 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1891 }
1892 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1893 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1894
1895 /* Set up address of statistics block */
1896 if (!(BGE_IS_5705_PLUS(sc))) {
1897 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1898 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1899 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1900 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1901 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1902 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1903 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1904 }
1905
1906 /* Set up address of status block */
1907 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1908 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1909 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1910 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1911
1912 /* Set up status block size. */
1913 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1914 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1915 val = BGE_STATBLKSZ_FULL;
1916 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1917 } else {
1918 val = BGE_STATBLKSZ_32BYTE;
1919 bzero(sc->bge_ldata.bge_status_block, 32);
1920 }
1921 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1922 sc->bge_cdata.bge_status_map,
1923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1924
1925 /* Turn on host coalescing state machine */
1926 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1927
1928 /* Turn on RX BD completion state machine and enable attentions */
1929 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1930 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1931
1932 /* Turn on RX list placement state machine */
1933 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1934
1935 /* Turn on RX list selector state machine. */
1936 if (!(BGE_IS_5705_PLUS(sc)))
1937 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1938
1939 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1940 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1941 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1942 BGE_MACMODE_FRMHDR_DMA_ENB;
1943
1944 if (sc->bge_flags & BGE_FLAG_TBI)
1945 val |= BGE_PORTMODE_TBI;
1946 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1947 val |= BGE_PORTMODE_GMII;
1948 else
1949 val |= BGE_PORTMODE_MII;
1950
1951 /* Turn on DMA, clear stats */
1952 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1953
1954 /* Set misc. local control, enable interrupts on attentions */
1955 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1956
1957#ifdef notdef
1958 /* Assert GPIO pins for PHY reset */
1959 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1960 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1961 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1962 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1963#endif
1964
1965 /* Turn on DMA completion state machine */
1966 if (!(BGE_IS_5705_PLUS(sc)))
1967 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1968
1969 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1970
1971 /* Enable host coalescing bug fix. */
1972 if (BGE_IS_5755_PLUS(sc))
1973 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1974
1975 /* Request larger DMA burst size to get better performance. */
1976 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1977 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1978
1979 /* Turn on write DMA state machine */
1980 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1981 DELAY(40);
1982
1983 /* Turn on read DMA state machine */
1984 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1985
1986 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1987 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1988
1989 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1990 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1991 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1992 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1993 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1994 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1995 if (sc->bge_flags & BGE_FLAG_PCIE)
1996 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1997 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
1998 val |= BGE_RDMAMODE_TSO4_ENABLE;
1999 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2000 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2001 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2002 val |= BGE_RDMAMODE_TSO6_ENABLE;
2003 }
2004 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2005 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2006 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2007 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2008 BGE_IS_5717_PLUS(sc)) {
2009 /*
2010 * Enable fix for read DMA FIFO overruns.
2011 * The fix is to limit the number of RX BDs
2012 * the hardware would fetch at a fime.
2013 */
2014 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
2015 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
2016 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2017 }
2018 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2019 DELAY(40);
2020
2021 /* Turn on RX data completion state machine */
2022 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2023
2024 /* Turn on RX BD initiator state machine */
2025 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2026
2027 /* Turn on RX data and RX BD initiator state machine */
2028 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2029
2030 /* Turn on Mbuf cluster free state machine */
2031 if (!(BGE_IS_5705_PLUS(sc)))
2032 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2033
2034 /* Turn on send BD completion state machine */
2035 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2036
2037 /* Turn on send data completion state machine */
2038 val = BGE_SDCMODE_ENABLE;
2039 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2040 val |= BGE_SDCMODE_CDELAY;
2041 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2042
2043 /* Turn on send data initiator state machine */
2044 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2045 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2046 BGE_SDIMODE_HW_LSO_PRE_DMA);
2047 else
2048 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2049
2050 /* Turn on send BD initiator state machine */
2051 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2052
2053 /* Turn on send BD selector state machine */
2054 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2055
2056 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2057 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2058 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2059
2060 /* ack/clear link change events */
2061 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2062 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2063 BGE_MACSTAT_LINK_CHANGED);
2064 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2065
2066 /*
2067 * Enable attention when the link has changed state for
2068 * devices that use auto polling.
2069 */
2070 if (sc->bge_flags & BGE_FLAG_TBI) {
2071 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2072 } else {
2073 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2074 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2075 DELAY(80);
2076 }
2077 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2078 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2079 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2080 BGE_EVTENB_MI_INTERRUPT);
2081 }
2082
2083 /*
2084 * Clear any pending link state attention.
2085 * Otherwise some link state change events may be lost until attention
2086 * is cleared by bge_intr() -> bge_link_upd() sequence.
2087 * It's not necessary on newer BCM chips - perhaps enabling link
2088 * state change attentions implies clearing pending attention.
2089 */
2090 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2091 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2092 BGE_MACSTAT_LINK_CHANGED);
2093
2094 /* Enable link state change attentions. */
2095 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2096
2097 return (0);
2098}
2099
2100const struct bge_revision *
2101bge_lookup_rev(uint32_t chipid)
2102{
2103 const struct bge_revision *br;
2104
2105 for (br = bge_revisions; br->br_name != NULL; br++) {
2106 if (br->br_chipid == chipid)
2107 return (br);
2108 }
2109
2110 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2111 if (br->br_chipid == BGE_ASICREV(chipid))
2112 return (br);
2113 }
2114
2115 return (NULL);
2116}
2117
2118const struct bge_vendor *
2119bge_lookup_vendor(uint16_t vid)
2120{
2121 const struct bge_vendor *v;
2122
2123 for (v = bge_vendors; v->v_name != NULL; v++)
2124 if (v->v_id == vid)
2125 return (v);
2126
2127 panic("%s: unknown vendor %d", __func__, vid);
2128 return (NULL);
2129}
2130
2131/*
2132 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2133 * against our list and return its name if we find a match.
2134 *
2135 * Note that since the Broadcom controller contains VPD support, we
2136 * try to get the device name string from the controller itself instead
2137 * of the compiled-in string. It guarantees we'll always announce the
2138 * right product name. We fall back to the compiled-in string when
2139 * VPD is unavailable or corrupt.
2140 */
2141static int
2142bge_probe(device_t dev)
2143{
2144 const struct bge_type *t = bge_devs;
2145 struct bge_softc *sc = device_get_softc(dev);
2146 uint16_t vid, did;
2147
2148 sc->bge_dev = dev;
2149 vid = pci_get_vendor(dev);
2150 did = pci_get_device(dev);
2151 while(t->bge_vid != 0) {
2152 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2153 char model[64], buf[96];
2154 const struct bge_revision *br;
2155 const struct bge_vendor *v;
2156 uint32_t id;
2157
2158 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2159 BGE_PCIMISCCTL_ASICREV_SHIFT;
2160 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2161 /*
2162 * Find the ASCI revision. Different chips
2163 * use different registers.
2164 */
2165 switch (pci_get_device(dev)) {
2166 case BCOM_DEVICEID_BCM5717:
2167 case BCOM_DEVICEID_BCM5718:
2168 id = pci_read_config(dev,
2169 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2170 break;
2171 default:
2172 id = pci_read_config(dev,
2173 BGE_PCI_PRODID_ASICREV, 4);
2174 }
2175 }
2176 br = bge_lookup_rev(id);
2177 v = bge_lookup_vendor(vid);
2178 {
2179#if __FreeBSD_version > 700024
2180 const char *pname;
2181
2182 if (bge_has_eaddr(sc) &&
2183 pci_get_vpd_ident(dev, &pname) == 0)
2184 snprintf(model, 64, "%s", pname);
2185 else
2186#endif
2187 snprintf(model, 64, "%s %s",
2188 v->v_name,
2189 br != NULL ? br->br_name :
2190 "NetXtreme Ethernet Controller");
2191 }
2192 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2193 br != NULL ? "" : "unknown ", id);
2194 device_set_desc_copy(dev, buf);
2195 return (0);
2196 }
2197 t++;
2198 }
2199
2200 return (ENXIO);
2201}
2202
2203static void
2204bge_dma_free(struct bge_softc *sc)
2205{
2206 int i;
2207
2208 /* Destroy DMA maps for RX buffers. */
2209 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2210 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2211 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2212 sc->bge_cdata.bge_rx_std_dmamap[i]);
2213 }
2214 if (sc->bge_cdata.bge_rx_std_sparemap)
2215 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2216 sc->bge_cdata.bge_rx_std_sparemap);
2217
2218 /* Destroy DMA maps for jumbo RX buffers. */
2219 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2220 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2221 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2222 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2223 }
2224 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2225 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2226 sc->bge_cdata.bge_rx_jumbo_sparemap);
2227
2228 /* Destroy DMA maps for TX buffers. */
2229 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2230 if (sc->bge_cdata.bge_tx_dmamap[i])
2231 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2232 sc->bge_cdata.bge_tx_dmamap[i]);
2233 }
2234
2235 if (sc->bge_cdata.bge_rx_mtag)
2236 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2237 if (sc->bge_cdata.bge_tx_mtag)
2238 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2239
2240
2241 /* Destroy standard RX ring. */
2242 if (sc->bge_cdata.bge_rx_std_ring_map)
2243 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2244 sc->bge_cdata.bge_rx_std_ring_map);
2245 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2246 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2247 sc->bge_ldata.bge_rx_std_ring,
2248 sc->bge_cdata.bge_rx_std_ring_map);
2249
2250 if (sc->bge_cdata.bge_rx_std_ring_tag)
2251 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2252
2253 /* Destroy jumbo RX ring. */
2254 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2255 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2256 sc->bge_cdata.bge_rx_jumbo_ring_map);
2257
2258 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2259 sc->bge_ldata.bge_rx_jumbo_ring)
2260 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2261 sc->bge_ldata.bge_rx_jumbo_ring,
2262 sc->bge_cdata.bge_rx_jumbo_ring_map);
2263
2264 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2265 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2266
2267 /* Destroy RX return ring. */
2268 if (sc->bge_cdata.bge_rx_return_ring_map)
2269 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2270 sc->bge_cdata.bge_rx_return_ring_map);
2271
2272 if (sc->bge_cdata.bge_rx_return_ring_map &&
2273 sc->bge_ldata.bge_rx_return_ring)
2274 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2275 sc->bge_ldata.bge_rx_return_ring,
2276 sc->bge_cdata.bge_rx_return_ring_map);
2277
2278 if (sc->bge_cdata.bge_rx_return_ring_tag)
2279 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2280
2281 /* Destroy TX ring. */
2282 if (sc->bge_cdata.bge_tx_ring_map)
2283 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2284 sc->bge_cdata.bge_tx_ring_map);
2285
2286 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2287 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2288 sc->bge_ldata.bge_tx_ring,
2289 sc->bge_cdata.bge_tx_ring_map);
2290
2291 if (sc->bge_cdata.bge_tx_ring_tag)
2292 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2293
2294 /* Destroy status block. */
2295 if (sc->bge_cdata.bge_status_map)
2296 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2297 sc->bge_cdata.bge_status_map);
2298
2299 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2300 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2301 sc->bge_ldata.bge_status_block,
2302 sc->bge_cdata.bge_status_map);
2303
2304 if (sc->bge_cdata.bge_status_tag)
2305 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2306
2307 /* Destroy statistics block. */
2308 if (sc->bge_cdata.bge_stats_map)
2309 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2310 sc->bge_cdata.bge_stats_map);
2311
2312 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2313 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2314 sc->bge_ldata.bge_stats,
2315 sc->bge_cdata.bge_stats_map);
2316
2317 if (sc->bge_cdata.bge_stats_tag)
2318 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2319
2320 if (sc->bge_cdata.bge_buffer_tag)
2321 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2322
2323 /* Destroy the parent tag. */
2324 if (sc->bge_cdata.bge_parent_tag)
2325 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2326}
2327
2328static int
2329bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2330 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2331 bus_addr_t *paddr, const char *msg)
2332{
2333 struct bge_dmamap_arg ctx;
2334 bus_addr_t lowaddr;
2335 bus_size_t ring_end;
2336 int error;
2337
2338 lowaddr = BUS_SPACE_MAXADDR;
2339again:
2340 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2341 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2342 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2343 if (error != 0) {
2344 device_printf(sc->bge_dev,
2345 "could not create %s dma tag\n", msg);
2346 return (ENOMEM);
2347 }
2348 /* Allocate DMA'able memory for ring. */
2349 error = bus_dmamem_alloc(*tag, (void **)ring,
2350 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2351 if (error != 0) {
2352 device_printf(sc->bge_dev,
2353 "could not allocate DMA'able memory for %s\n", msg);
2354 return (ENOMEM);
2355 }
2356 /* Load the address of the ring. */
2357 ctx.bge_busaddr = 0;
2358 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2359 &ctx, BUS_DMA_NOWAIT);
2360 if (error != 0) {
2361 device_printf(sc->bge_dev,
2362 "could not load DMA'able memory for %s\n", msg);
2363 return (ENOMEM);
2364 }
2365 *paddr = ctx.bge_busaddr;
2366 ring_end = *paddr + maxsize;
2367 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2368 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2369 /*
2370 * 4GB boundary crossed. Limit maximum allowable DMA
2371 * address space to 32bit and try again.
2372 */
2373 bus_dmamap_unload(*tag, *map);
2374 bus_dmamem_free(*tag, *ring, *map);
2375 bus_dma_tag_destroy(*tag);
2376 if (bootverbose)
2377 device_printf(sc->bge_dev, "4GB boundary crossed, "
2378 "limit DMA address space to 32bit for %s\n", msg);
2379 *ring = NULL;
2380 *tag = NULL;
2381 *map = NULL;
2382 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2383 goto again;
2384 }
2385 return (0);
2386}
2387
2388static int
2389bge_dma_alloc(struct bge_softc *sc)
2390{
2391 bus_addr_t lowaddr;
2392 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2393 int i, error;
2394
2395 lowaddr = BUS_SPACE_MAXADDR;
2396 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2397 lowaddr = BGE_DMA_MAXADDR;
2398 /*
2399 * Allocate the parent bus DMA tag appropriate for PCI.
2400 */
2401 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2402 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2403 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2404 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2405 if (error != 0) {
2406 device_printf(sc->bge_dev,
2407 "could not allocate parent dma tag\n");
2408 return (ENOMEM);
2409 }
2410
2411 /* Create tag for standard RX ring. */
2412 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2413 &sc->bge_cdata.bge_rx_std_ring_tag,
2414 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2415 &sc->bge_cdata.bge_rx_std_ring_map,
2416 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2417 if (error)
2418 return (error);
2419
2420 /* Create tag for RX return ring. */
2421 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2422 &sc->bge_cdata.bge_rx_return_ring_tag,
2423 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2424 &sc->bge_cdata.bge_rx_return_ring_map,
2425 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2426 if (error)
2427 return (error);
2428
2429 /* Create tag for TX ring. */
2430 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2431 &sc->bge_cdata.bge_tx_ring_tag,
2432 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2433 &sc->bge_cdata.bge_tx_ring_map,
2434 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2435 if (error)
2436 return (error);
2437
2438 /*
2439 * Create tag for status block.
2440 * Because we only use single Tx/Rx/Rx return ring, use
2441 * minimum status block size except BCM5700 AX/BX which
2442 * seems to want to see full status block size regardless
2443 * of configured number of ring.
2444 */
2445 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2446 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2447 sbsz = BGE_STATUS_BLK_SZ;
2448 else
2449 sbsz = 32;
2450 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2451 &sc->bge_cdata.bge_status_tag,
2452 (uint8_t **)&sc->bge_ldata.bge_status_block,
2453 &sc->bge_cdata.bge_status_map,
2454 &sc->bge_ldata.bge_status_block_paddr, "status block");
2455 if (error)
2456 return (error);
2457
2458 /* Create tag for statistics block. */
2459 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2460 &sc->bge_cdata.bge_stats_tag,
2461 (uint8_t **)&sc->bge_ldata.bge_stats,
2462 &sc->bge_cdata.bge_stats_map,
2463 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2464 if (error)
2465 return (error);
2466
2467 /* Create tag for jumbo RX ring. */
2468 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2469 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2470 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2471 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2472 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2473 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2474 if (error)
2475 return (error);
2476 }
2477
2478 /* Create parent tag for buffers. */
2479 boundary = 0;
2480 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2481 boundary = BGE_DMA_BNDRY;
2482 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2483 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2484 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2485 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2486 if (error != 0) {
2487 device_printf(sc->bge_dev,
2488 "could not allocate buffer dma tag\n");
2489 return (ENOMEM);
2490 }
2491 /* Create tag for Tx mbufs. */
2492 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2493 txsegsz = BGE_TSOSEG_SZ;
2494 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2495 } else {
2496 txsegsz = MCLBYTES;
2497 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2498 }
2499 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2500 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2501 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2502 &sc->bge_cdata.bge_tx_mtag);
2503
2504 if (error) {
2505 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2506 return (ENOMEM);
2507 }
2508
2509 /* Create tag for Rx mbufs. */
2510 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2511 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2512 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2513
2514 if (error) {
2515 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2516 return (ENOMEM);
2517 }
2518
2519 /* Create DMA maps for RX buffers. */
2520 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2521 &sc->bge_cdata.bge_rx_std_sparemap);
2522 if (error) {
2523 device_printf(sc->bge_dev,
2524 "can't create spare DMA map for RX\n");
2525 return (ENOMEM);
2526 }
2527 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2528 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2529 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2530 if (error) {
2531 device_printf(sc->bge_dev,
2532 "can't create DMA map for RX\n");
2533 return (ENOMEM);
2534 }
2535 }
2536
2537 /* Create DMA maps for TX buffers. */
2538 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2539 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2540 &sc->bge_cdata.bge_tx_dmamap[i]);
2541 if (error) {
2542 device_printf(sc->bge_dev,
2543 "can't create DMA map for TX\n");
2544 return (ENOMEM);
2545 }
2546 }
2547
2548 /* Create tags for jumbo RX buffers. */
2549 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2550 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2551 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2552 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2553 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2554 if (error) {
2555 device_printf(sc->bge_dev,
2556 "could not allocate jumbo dma tag\n");
2557 return (ENOMEM);
2558 }
2559 /* Create DMA maps for jumbo RX buffers. */
2560 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2561 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2562 if (error) {
2563 device_printf(sc->bge_dev,
2564 "can't create spare DMA map for jumbo RX\n");
2565 return (ENOMEM);
2566 }
2567 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2568 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2569 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2570 if (error) {
2571 device_printf(sc->bge_dev,
2572 "can't create DMA map for jumbo RX\n");
2573 return (ENOMEM);
2574 }
2575 }
2576 }
2577
2578 return (0);
2579}
2580
2581/*
2582 * Return true if this device has more than one port.
2583 */
2584static int
2585bge_has_multiple_ports(struct bge_softc *sc)
2586{
2587 device_t dev = sc->bge_dev;
2588 u_int b, d, f, fscan, s;
2589
2590 d = pci_get_domain(dev);
2591 b = pci_get_bus(dev);
2592 s = pci_get_slot(dev);
2593 f = pci_get_function(dev);
2594 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2595 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2596 return (1);
2597 return (0);
2598}
2599
2600/*
2601 * Return true if MSI can be used with this device.
2602 */
2603static int
2604bge_can_use_msi(struct bge_softc *sc)
2605{
2606 int can_use_msi = 0;
2607
2608 /* Disable MSI for polling(4). */
2609#ifdef DEVICE_POLLING
2610 return (0);
2611#endif
2612 switch (sc->bge_asicrev) {
2613 case BGE_ASICREV_BCM5714_A0:
2614 case BGE_ASICREV_BCM5714:
2615 /*
2616 * Apparently, MSI doesn't work when these chips are
2617 * configured in single-port mode.
2618 */
2619 if (bge_has_multiple_ports(sc))
2620 can_use_msi = 1;
2621 break;
2622 case BGE_ASICREV_BCM5750:
2623 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2624 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2625 can_use_msi = 1;
2626 break;
2627 default:
2628 if (BGE_IS_575X_PLUS(sc))
2629 can_use_msi = 1;
2630 }
2631 return (can_use_msi);
2632}
2633
2634static int
2635bge_attach(device_t dev)
2636{
2637 struct ifnet *ifp;
2638 struct bge_softc *sc;
2639 uint32_t hwcfg = 0, misccfg;
2640 u_char eaddr[ETHER_ADDR_LEN];
2641 int error, f, msicount, phy_addr, reg, rid, trys;
2642
2643 sc = device_get_softc(dev);
2644 sc->bge_dev = dev;
2645
2646 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2647
2648 /*
2649 * Map control/status registers.
2650 */
2651 pci_enable_busmaster(dev);
2652
2653 rid = PCIR_BAR(0);
2654 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2655 RF_ACTIVE);
2656
2657 if (sc->bge_res == NULL) {
2658 device_printf (sc->bge_dev, "couldn't map memory\n");
2659 error = ENXIO;
2660 goto fail;
2661 }
2662
2663 /* Save various chip information. */
2664 sc->bge_chipid =
2665 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2666 BGE_PCIMISCCTL_ASICREV_SHIFT;
2667 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2668 /*
2669 * Find the ASCI revision. Different chips use different
2670 * registers.
2671 */
2672 switch (pci_get_device(dev)) {
2673 case BCOM_DEVICEID_BCM5717:
2674 case BCOM_DEVICEID_BCM5718:
2675 sc->bge_chipid = pci_read_config(dev,
2676 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2677 break;
2678 default:
2679 sc->bge_chipid = pci_read_config(dev,
2680 BGE_PCI_PRODID_ASICREV, 4);
2681 }
2682 }
2683 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2684 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2685
2686 /* Set default PHY address. */
2687 phy_addr = 1;
2688 /*
2689 * PHY address mapping for various devices.
2690 *
2691 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2692 * ---------+-------+-------+-------+-------+
2693 * BCM57XX | 1 | X | X | X |
2694 * BCM5704 | 1 | X | 1 | X |
2695 * BCM5717 | 1 | 8 | 2 | 9 |
2696 *
2697 * Other addresses may respond but they are not
2698 * IEEE compliant PHYs and should be ignored.
2699 */
2700 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
2701 f = pci_get_function(dev);
2702 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2703 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2704 BGE_SGDIGSTS_IS_SERDES)
2705 phy_addr = f + 8;
2706 else
2707 phy_addr = f + 1;
2708 } else if (sc->bge_chipid == BGE_CHIPID_BCM5717_B0) {
2709 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2710 BGE_CPMU_PHY_STRAP_IS_SERDES)
2711 phy_addr = f + 8;
2712 else
2713 phy_addr = f + 1;
2714 }
2715 }
2716
2717 /*
2718 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2719 * 5705 A0 and A1 chips.
2720 */
2721 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2722 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2723 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2724 sc->bge_chipid != BGE_CHIPID_BCM5705_A1 &&
2725 !BGE_IS_5717_PLUS(sc))
2726 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2727
2728 if (bge_has_eaddr(sc))
2729 sc->bge_flags |= BGE_FLAG_EADDR;
2730
2731 /* Save chipset family. */
2732 switch (sc->bge_asicrev) {
2733 case BGE_ASICREV_BCM5717:
2734 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2735 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2736 BGE_FLAG_SHORT_DMA_BUG | BGE_FLAG_JUMBO_FRAME;
2737 break;
2738 case BGE_ASICREV_BCM5755:
2739 case BGE_ASICREV_BCM5761:
2740 case BGE_ASICREV_BCM5784:
2741 case BGE_ASICREV_BCM5785:
2742 case BGE_ASICREV_BCM5787:
2743 case BGE_ASICREV_BCM57780:
2744 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2745 BGE_FLAG_5705_PLUS;
2746 break;
2747 case BGE_ASICREV_BCM5700:
2748 case BGE_ASICREV_BCM5701:
2749 case BGE_ASICREV_BCM5703:
2750 case BGE_ASICREV_BCM5704:
2751 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2752 break;
2753 case BGE_ASICREV_BCM5714_A0:
2754 case BGE_ASICREV_BCM5780:
2755 case BGE_ASICREV_BCM5714:
2756 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2757 /* FALLTHROUGH */
2758 case BGE_ASICREV_BCM5750:
2759 case BGE_ASICREV_BCM5752:
2760 case BGE_ASICREV_BCM5906:
2761 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2762 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2763 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2764 /* FALLTHROUGH */
2765 case BGE_ASICREV_BCM5705:
2766 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2767 break;
2768 }
2769
2770 /* Set various PHY bug flags. */
2771 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2772 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2773 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2774 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2775 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2776 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2777 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2778 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2779 if (pci_get_subvendor(dev) == DELL_VENDORID)
2780 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2781 if ((BGE_IS_5705_PLUS(sc)) &&
2782 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2783 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2784 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2785 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2786 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2787 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2788 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2789 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2790 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2791 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2792 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2793 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2794 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2795 } else
2796 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2797 }
2798
2799 /* Identify the chips that use an CPMU. */
2800 if (BGE_IS_5717_PLUS(sc) ||
2801 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2802 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2803 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2804 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2805 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2806 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2807 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2808 else
2809 sc->bge_mi_mode = BGE_MIMODE_BASE;
2810 /* Enable auto polling for BCM570[0-5]. */
2811 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2812 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2813
2814 /*
2815 * All controllers that are not 5755 or higher have 4GB
2816 * boundary DMA bug.
2817 * Whenever an address crosses a multiple of the 4GB boundary
2818 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2819 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2820 * state machine will lockup and cause the device to hang.
2821 */
2822 if (BGE_IS_5755_PLUS(sc) == 0)
2823 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2824
2825 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2826 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2827 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2828 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2829 sc->bge_flags |= BGE_FLAG_5788;
2830 }
2831
2832 /*
2833 * Some controllers seem to require a special firmware to use
2834 * TSO. But the firmware is not available to FreeBSD and Linux
2835 * claims that the TSO performed by the firmware is slower than
2836 * hardware based TSO. Moreover the firmware based TSO has one
2837 * known bug which can't handle TSO if ethernet header + IP/TCP
2838 * header is greater than 80 bytes. The workaround for the TSO
2839 * bug exist but it seems it's too expensive than not using
2840 * TSO at all. Some hardwares also have the TSO bug so limit
2841 * the TSO to the controllers that are not affected TSO issues
2842 * (e.g. 5755 or higher).
2843 */
2844 if (BGE_IS_5717_PLUS(sc)) {
2845 /* BCM5717 requires different TSO configuration. */
2846 sc->bge_flags |= BGE_FLAG_TSO3;
2847 } else if (BGE_IS_5755_PLUS(sc)) {
2848 /*
2849 * BCM5754 and BCM5787 shares the same ASIC id so
2850 * explicit device id check is required.
2851 * Due to unknown reason TSO does not work on BCM5755M.
2852 */
2853 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2854 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2855 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2856 sc->bge_flags |= BGE_FLAG_TSO;
2857 }
2858
2859 /*
2860 * Check if this is a PCI-X or PCI Express device.
2861 */
2862 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2863 /*
2864 * Found a PCI Express capabilities register, this
2865 * must be a PCI Express device.
2866 */
2867 sc->bge_flags |= BGE_FLAG_PCIE;
2868 sc->bge_expcap = reg;
2869 if (pci_get_max_read_req(dev) != 4096)
2870 pci_set_max_read_req(dev, 4096);
2871 } else {
2872 /*
2873 * Check if the device is in PCI-X Mode.
2874 * (This bit is not valid on PCI Express controllers.)
2875 */
2876 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2877 sc->bge_pcixcap = reg;
2878 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2879 BGE_PCISTATE_PCI_BUSMODE) == 0)
2880 sc->bge_flags |= BGE_FLAG_PCIX;
2881 }
2882
2883 /*
2884 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2885 * not actually a MAC controller bug but an issue with the embedded
2886 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2887 */
2888 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2889 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2890 /*
2891 * Allocate the interrupt, using MSI if possible. These devices
2892 * support 8 MSI messages, but only the first one is used in
2893 * normal operation.
2894 */
2895 rid = 0;
2896 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2897 sc->bge_msicap = reg;
2898 if (bge_can_use_msi(sc)) {
2899 msicount = pci_msi_count(dev);
2900 if (msicount > 1)
2901 msicount = 1;
2902 } else
2903 msicount = 0;
2904 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2905 rid = 1;
2906 sc->bge_flags |= BGE_FLAG_MSI;
2907 }
2908 }
2909
2910 /*
2911 * All controllers except BCM5700 supports tagged status but
2912 * we use tagged status only for MSI case on BCM5717. Otherwise
2913 * MSI on BCM5717 does not work.
2914 */
2915#ifndef DEVICE_POLLING
2916 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
2917 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
2918#endif
2919
2920 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2921 RF_SHAREABLE | RF_ACTIVE);
2922
2923 if (sc->bge_irq == NULL) {
2924 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2925 error = ENXIO;
2926 goto fail;
2927 }
2928
2929 device_printf(dev,
2930 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2931 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2932 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2933 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2934
2935 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2936
2937 /* Try to reset the chip. */
2938 if (bge_reset(sc)) {
2939 device_printf(sc->bge_dev, "chip reset failed\n");
2940 error = ENXIO;
2941 goto fail;
2942 }
2943
2944 sc->bge_asf_mode = 0;
2945 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2946 == BGE_MAGIC_NUMBER)) {
2947 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2948 & BGE_HWCFG_ASF) {
2949 sc->bge_asf_mode |= ASF_ENABLE;
2950 sc->bge_asf_mode |= ASF_STACKUP;
2951 if (BGE_IS_575X_PLUS(sc))
2952 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2953 }
2954 }
2955
2956 /* Try to reset the chip again the nice way. */
2957 bge_stop_fw(sc);
2958 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2959 if (bge_reset(sc)) {
2960 device_printf(sc->bge_dev, "chip reset failed\n");
2961 error = ENXIO;
2962 goto fail;
2963 }
2964
2965 bge_sig_legacy(sc, BGE_RESET_STOP);
2966 bge_sig_post_reset(sc, BGE_RESET_STOP);
2967
2968 if (bge_chipinit(sc)) {
2969 device_printf(sc->bge_dev, "chip initialization failed\n");
2970 error = ENXIO;
2971 goto fail;
2972 }
2973
2974 error = bge_get_eaddr(sc, eaddr);
2975 if (error) {
2976 device_printf(sc->bge_dev,
2977 "failed to read station address\n");
2978 error = ENXIO;
2979 goto fail;
2980 }
2981
2982 /* 5705 limits RX return ring to 512 entries. */
2983 if (BGE_IS_5717_PLUS(sc))
2984 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2985 else if (BGE_IS_5705_PLUS(sc))
2986 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2987 else
2988 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2989
2990 if (bge_dma_alloc(sc)) {
2991 device_printf(sc->bge_dev,
2992 "failed to allocate DMA resources\n");
2993 error = ENXIO;
2994 goto fail;
2995 }
2996
2997 bge_add_sysctls(sc);
2998
2999 /* Set default tuneable values. */
3000 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3001 sc->bge_rx_coal_ticks = 150;
3002 sc->bge_tx_coal_ticks = 150;
3003 sc->bge_rx_max_coal_bds = 10;
3004 sc->bge_tx_max_coal_bds = 10;
3005
3006 /* Initialize checksum features to use. */
3007 sc->bge_csum_features = BGE_CSUM_FEATURES;
3008 if (sc->bge_forced_udpcsum != 0)
3009 sc->bge_csum_features |= CSUM_UDP;
3010
3011 /* Set up ifnet structure */
3012 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3013 if (ifp == NULL) {
3014 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3015 error = ENXIO;
3016 goto fail;
3017 }
3018 ifp->if_softc = sc;
3019 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3020 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3021 ifp->if_ioctl = bge_ioctl;
3022 ifp->if_start = bge_start;
3023 ifp->if_init = bge_init;
3024 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3025 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3026 IFQ_SET_READY(&ifp->if_snd);
3027 ifp->if_hwassist = sc->bge_csum_features;
3028 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3029 IFCAP_VLAN_MTU;
3030 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3031 ifp->if_hwassist |= CSUM_TSO;
3032 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3033 }
3034#ifdef IFCAP_VLAN_HWCSUM
3035 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3036#endif
3037 ifp->if_capenable = ifp->if_capabilities;
3038#ifdef DEVICE_POLLING
3039 ifp->if_capabilities |= IFCAP_POLLING;
3040#endif
3041
3042 /*
3043 * 5700 B0 chips do not support checksumming correctly due
3044 * to hardware bugs.
3045 */
3046 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3047 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3048 ifp->if_capenable &= ~IFCAP_HWCSUM;
3049 ifp->if_hwassist = 0;
3050 }
3051
3052 /*
3053 * Figure out what sort of media we have by checking the
3054 * hardware config word in the first 32k of NIC internal memory,
3055 * or fall back to examining the EEPROM if necessary.
3056 * Note: on some BCM5700 cards, this value appears to be unset.
3057 * If that's the case, we have to rely on identifying the NIC
3058 * by its PCI subsystem ID, as we do below for the SysKonnect
3059 * SK-9D41.
3060 */
3061 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3062 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3063 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3064 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3065 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3066 sizeof(hwcfg))) {
3067 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3068 error = ENXIO;
3069 goto fail;
3070 }
3071 hwcfg = ntohl(hwcfg);
3072 }
3073
3074 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3075 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3076 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3077 if (BGE_IS_5714_FAMILY(sc))
3078 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3079 else
3080 sc->bge_flags |= BGE_FLAG_TBI;
3081 }
3082
3083 if (sc->bge_flags & BGE_FLAG_TBI) {
3084 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3085 bge_ifmedia_sts);
3086 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3087 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3088 0, NULL);
3089 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3090 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3091 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3092 } else {
3093 /*
3094 * Do transceiver setup and tell the firmware the
3095 * driver is down so we can try to get access the
3096 * probe if ASF is running. Retry a couple of times
3097 * if we get a conflict with the ASF firmware accessing
3098 * the PHY.
3099 */
3100 trys = 0;
3101 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3102again:
3103 bge_asf_driver_up(sc);
3104
924 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
925 else
926 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
927 } else {
928 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
929 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
930 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
931 }
932}
933
934/*
935 * Intialize a standard receive ring descriptor.
936 */
937static int
938bge_newbuf_std(struct bge_softc *sc, int i)
939{
940 struct mbuf *m;
941 struct bge_rx_bd *r;
942 bus_dma_segment_t segs[1];
943 bus_dmamap_t map;
944 int error, nsegs;
945
946 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
947 if (m == NULL)
948 return (ENOBUFS);
949 m->m_len = m->m_pkthdr.len = MCLBYTES;
950 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
951 m_adj(m, ETHER_ALIGN);
952
953 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
954 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
955 if (error != 0) {
956 m_freem(m);
957 return (error);
958 }
959 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
960 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
961 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
962 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
963 sc->bge_cdata.bge_rx_std_dmamap[i]);
964 }
965 map = sc->bge_cdata.bge_rx_std_dmamap[i];
966 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
967 sc->bge_cdata.bge_rx_std_sparemap = map;
968 sc->bge_cdata.bge_rx_std_chain[i] = m;
969 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
970 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
971 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
972 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
973 r->bge_flags = BGE_RXBDFLAG_END;
974 r->bge_len = segs[0].ds_len;
975 r->bge_idx = i;
976
977 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
978 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
979
980 return (0);
981}
982
983/*
984 * Initialize a jumbo receive ring descriptor. This allocates
985 * a jumbo buffer from the pool managed internally by the driver.
986 */
987static int
988bge_newbuf_jumbo(struct bge_softc *sc, int i)
989{
990 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
991 bus_dmamap_t map;
992 struct bge_extrx_bd *r;
993 struct mbuf *m;
994 int error, nsegs;
995
996 MGETHDR(m, M_DONTWAIT, MT_DATA);
997 if (m == NULL)
998 return (ENOBUFS);
999
1000 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1001 if (!(m->m_flags & M_EXT)) {
1002 m_freem(m);
1003 return (ENOBUFS);
1004 }
1005 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1006 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1007 m_adj(m, ETHER_ALIGN);
1008
1009 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1010 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1011 if (error != 0) {
1012 m_freem(m);
1013 return (error);
1014 }
1015
1016 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1017 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1018 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1019 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1020 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1021 }
1022 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1023 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1024 sc->bge_cdata.bge_rx_jumbo_sparemap;
1025 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1026 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1027 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1028 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1029 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1030 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1031
1032 /*
1033 * Fill in the extended RX buffer descriptor.
1034 */
1035 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1036 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1037 r->bge_idx = i;
1038 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1039 switch (nsegs) {
1040 case 4:
1041 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1042 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1043 r->bge_len3 = segs[3].ds_len;
1044 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1045 case 3:
1046 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1047 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1048 r->bge_len2 = segs[2].ds_len;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1050 case 2:
1051 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1052 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1053 r->bge_len1 = segs[1].ds_len;
1054 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1055 case 1:
1056 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1057 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1058 r->bge_len0 = segs[0].ds_len;
1059 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1060 break;
1061 default:
1062 panic("%s: %d segments\n", __func__, nsegs);
1063 }
1064
1065 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1066 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1067
1068 return (0);
1069}
1070
1071static int
1072bge_init_rx_ring_std(struct bge_softc *sc)
1073{
1074 int error, i;
1075
1076 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1077 sc->bge_std = 0;
1078 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1079 if ((error = bge_newbuf_std(sc, i)) != 0)
1080 return (error);
1081 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1082 }
1083
1084 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1085 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1086
1087 sc->bge_std = 0;
1088 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1089
1090 return (0);
1091}
1092
1093static void
1094bge_free_rx_ring_std(struct bge_softc *sc)
1095{
1096 int i;
1097
1098 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1099 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1100 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1101 sc->bge_cdata.bge_rx_std_dmamap[i],
1102 BUS_DMASYNC_POSTREAD);
1103 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1104 sc->bge_cdata.bge_rx_std_dmamap[i]);
1105 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1106 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1107 }
1108 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1109 sizeof(struct bge_rx_bd));
1110 }
1111}
1112
1113static int
1114bge_init_rx_ring_jumbo(struct bge_softc *sc)
1115{
1116 struct bge_rcb *rcb;
1117 int error, i;
1118
1119 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1120 sc->bge_jumbo = 0;
1121 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1122 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1123 return (error);
1124 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1125 }
1126
1127 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1128 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1129
1130 sc->bge_jumbo = 0;
1131
1132 /* Enable the jumbo receive producer ring. */
1133 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1134 rcb->bge_maxlen_flags =
1135 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1136 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1137
1138 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1139
1140 return (0);
1141}
1142
1143static void
1144bge_free_rx_ring_jumbo(struct bge_softc *sc)
1145{
1146 int i;
1147
1148 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1149 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1150 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1151 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1152 BUS_DMASYNC_POSTREAD);
1153 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1154 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1155 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1156 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1157 }
1158 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1159 sizeof(struct bge_extrx_bd));
1160 }
1161}
1162
1163static void
1164bge_free_tx_ring(struct bge_softc *sc)
1165{
1166 int i;
1167
1168 if (sc->bge_ldata.bge_tx_ring == NULL)
1169 return;
1170
1171 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1172 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1173 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1174 sc->bge_cdata.bge_tx_dmamap[i],
1175 BUS_DMASYNC_POSTWRITE);
1176 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1177 sc->bge_cdata.bge_tx_dmamap[i]);
1178 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1179 sc->bge_cdata.bge_tx_chain[i] = NULL;
1180 }
1181 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1182 sizeof(struct bge_tx_bd));
1183 }
1184}
1185
1186static int
1187bge_init_tx_ring(struct bge_softc *sc)
1188{
1189 sc->bge_txcnt = 0;
1190 sc->bge_tx_saved_considx = 0;
1191
1192 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1193 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1194 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1195
1196 /* Initialize transmit producer index for host-memory send ring. */
1197 sc->bge_tx_prodidx = 0;
1198 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1199
1200 /* 5700 b2 errata */
1201 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1202 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1203
1204 /* NIC-memory send ring not used; initialize to zero. */
1205 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1206 /* 5700 b2 errata */
1207 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1208 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1209
1210 return (0);
1211}
1212
1213static void
1214bge_setpromisc(struct bge_softc *sc)
1215{
1216 struct ifnet *ifp;
1217
1218 BGE_LOCK_ASSERT(sc);
1219
1220 ifp = sc->bge_ifp;
1221
1222 /* Enable or disable promiscuous mode as needed. */
1223 if (ifp->if_flags & IFF_PROMISC)
1224 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1225 else
1226 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1227}
1228
1229static void
1230bge_setmulti(struct bge_softc *sc)
1231{
1232 struct ifnet *ifp;
1233 struct ifmultiaddr *ifma;
1234 uint32_t hashes[4] = { 0, 0, 0, 0 };
1235 int h, i;
1236
1237 BGE_LOCK_ASSERT(sc);
1238
1239 ifp = sc->bge_ifp;
1240
1241 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1242 for (i = 0; i < 4; i++)
1243 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1244 return;
1245 }
1246
1247 /* First, zot all the existing filters. */
1248 for (i = 0; i < 4; i++)
1249 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1250
1251 /* Now program new ones. */
1252 if_maddr_rlock(ifp);
1253 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1254 if (ifma->ifma_addr->sa_family != AF_LINK)
1255 continue;
1256 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1257 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1258 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1259 }
1260 if_maddr_runlock(ifp);
1261
1262 for (i = 0; i < 4; i++)
1263 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1264}
1265
1266static void
1267bge_setvlan(struct bge_softc *sc)
1268{
1269 struct ifnet *ifp;
1270
1271 BGE_LOCK_ASSERT(sc);
1272
1273 ifp = sc->bge_ifp;
1274
1275 /* Enable or disable VLAN tag stripping as needed. */
1276 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1277 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1278 else
1279 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1280}
1281
1282static void
1283bge_sig_pre_reset(struct bge_softc *sc, int type)
1284{
1285
1286 /*
1287 * Some chips don't like this so only do this if ASF is enabled
1288 */
1289 if (sc->bge_asf_mode)
1290 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1291
1292 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1293 switch (type) {
1294 case BGE_RESET_START:
1295 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1296 break;
1297 case BGE_RESET_STOP:
1298 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1299 break;
1300 }
1301 }
1302}
1303
1304static void
1305bge_sig_post_reset(struct bge_softc *sc, int type)
1306{
1307
1308 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1309 switch (type) {
1310 case BGE_RESET_START:
1311 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1312 /* START DONE */
1313 break;
1314 case BGE_RESET_STOP:
1315 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1316 break;
1317 }
1318 }
1319}
1320
1321static void
1322bge_sig_legacy(struct bge_softc *sc, int type)
1323{
1324
1325 if (sc->bge_asf_mode) {
1326 switch (type) {
1327 case BGE_RESET_START:
1328 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1329 break;
1330 case BGE_RESET_STOP:
1331 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1332 break;
1333 }
1334 }
1335}
1336
1337static void
1338bge_stop_fw(struct bge_softc *sc)
1339{
1340 int i;
1341
1342 if (sc->bge_asf_mode) {
1343 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1344 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1345 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1346
1347 for (i = 0; i < 100; i++ ) {
1348 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1349 break;
1350 DELAY(10);
1351 }
1352 }
1353}
1354
1355/*
1356 * Do endian, PCI and DMA initialization.
1357 */
1358static int
1359bge_chipinit(struct bge_softc *sc)
1360{
1361 uint32_t dma_rw_ctl, misc_ctl;
1362 uint16_t val;
1363 int i;
1364
1365 /* Set endianness before we access any non-PCI registers. */
1366 misc_ctl = BGE_INIT;
1367 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1368 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1369 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1370
1371 /* Clear the MAC control register */
1372 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1373
1374 /*
1375 * Clear the MAC statistics block in the NIC's
1376 * internal memory.
1377 */
1378 for (i = BGE_STATS_BLOCK;
1379 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1380 BGE_MEMWIN_WRITE(sc, i, 0);
1381
1382 for (i = BGE_STATUS_BLOCK;
1383 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1384 BGE_MEMWIN_WRITE(sc, i, 0);
1385
1386 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1387 /*
1388 * Fix data corruption caused by non-qword write with WB.
1389 * Fix master abort in PCI mode.
1390 * Fix PCI latency timer.
1391 */
1392 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1393 val |= (1 << 10) | (1 << 12) | (1 << 13);
1394 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1395 }
1396
1397 /*
1398 * Set up the PCI DMA control register.
1399 */
1400 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1401 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1402 if (sc->bge_flags & BGE_FLAG_PCIE) {
1403 /* Read watermark not used, 128 bytes for write. */
1404 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1405 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1406 if (BGE_IS_5714_FAMILY(sc)) {
1407 /* 256 bytes for read and write. */
1408 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1409 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1410 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1411 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1412 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1413 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1414 /*
1415 * In the BCM5703, the DMA read watermark should
1416 * be set to less than or equal to the maximum
1417 * memory read byte count of the PCI-X command
1418 * register.
1419 */
1420 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1421 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1422 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1423 /* 1536 bytes for read, 384 bytes for write. */
1424 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1425 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1426 } else {
1427 /* 384 bytes for read and write. */
1428 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1429 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1430 0x0F;
1431 }
1432 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1433 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1434 uint32_t tmp;
1435
1436 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1437 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1438 if (tmp == 6 || tmp == 7)
1439 dma_rw_ctl |=
1440 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1441
1442 /* Set PCI-X DMA write workaround. */
1443 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1444 }
1445 } else {
1446 /* Conventional PCI bus: 256 bytes for read and write. */
1447 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1448 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1449
1450 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1451 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1452 dma_rw_ctl |= 0x0F;
1453 }
1454 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1455 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1456 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1457 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1458 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1459 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1460 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1461 if (BGE_IS_5717_PLUS(sc))
1462 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1463 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1464
1465 /*
1466 * Set up general mode register.
1467 */
1468 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1469 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1470 BGE_MODECTL_TX_NO_PHDR_CSUM);
1471
1472 /*
1473 * BCM5701 B5 have a bug causing data corruption when using
1474 * 64-bit DMA reads, which can be terminated early and then
1475 * completed later as 32-bit accesses, in combination with
1476 * certain bridges.
1477 */
1478 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1479 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1480 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1481
1482 /*
1483 * Tell the firmware the driver is running
1484 */
1485 if (sc->bge_asf_mode & ASF_STACKUP)
1486 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1487
1488 /*
1489 * Disable memory write invalidate. Apparently it is not supported
1490 * properly by these devices. Also ensure that INTx isn't disabled,
1491 * as these chips need it even when using MSI.
1492 */
1493 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1494 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1495
1496 /* Set the timer prescaler (always 66Mhz) */
1497 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1498
1499 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1500 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1501 DELAY(40); /* XXX */
1502
1503 /* Put PHY into ready state */
1504 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1505 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1506 DELAY(40);
1507 }
1508
1509 return (0);
1510}
1511
1512static int
1513bge_blockinit(struct bge_softc *sc)
1514{
1515 struct bge_rcb *rcb;
1516 bus_size_t vrcb;
1517 bge_hostaddr taddr;
1518 uint32_t val;
1519 int i, limit;
1520
1521 /*
1522 * Initialize the memory window pointer register so that
1523 * we can access the first 32K of internal NIC RAM. This will
1524 * allow us to set up the TX send ring RCBs and the RX return
1525 * ring RCBs, plus other things which live in NIC memory.
1526 */
1527 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1528
1529 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1530
1531 if (!(BGE_IS_5705_PLUS(sc))) {
1532 /* Configure mbuf memory pool */
1533 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1534 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1535 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1536 else
1537 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1538
1539 /* Configure DMA resource pool */
1540 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1541 BGE_DMA_DESCRIPTORS);
1542 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1543 }
1544
1545 /* Configure mbuf pool watermarks */
1546 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1547 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1548 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1549 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1550 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1551 } else {
1552 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1553 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1554 }
1555 } else if (!BGE_IS_5705_PLUS(sc)) {
1556 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1557 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1558 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1559 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1560 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1561 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1562 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1563 } else {
1564 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1565 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1566 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1567 }
1568
1569 /* Configure DMA resource watermarks */
1570 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1571 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1572
1573 /* Enable buffer manager */
1574 if (!(BGE_IS_5705_PLUS(sc))) {
1575 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1576 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1577
1578 /* Poll for buffer manager start indication */
1579 for (i = 0; i < BGE_TIMEOUT; i++) {
1580 DELAY(10);
1581 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1582 break;
1583 }
1584
1585 if (i == BGE_TIMEOUT) {
1586 device_printf(sc->bge_dev,
1587 "buffer manager failed to start\n");
1588 return (ENXIO);
1589 }
1590 }
1591
1592 /* Enable flow-through queues */
1593 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1594 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1595
1596 /* Wait until queue initialization is complete */
1597 for (i = 0; i < BGE_TIMEOUT; i++) {
1598 DELAY(10);
1599 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1600 break;
1601 }
1602
1603 if (i == BGE_TIMEOUT) {
1604 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1605 return (ENXIO);
1606 }
1607
1608 /*
1609 * Summary of rings supported by the controller:
1610 *
1611 * Standard Receive Producer Ring
1612 * - This ring is used to feed receive buffers for "standard"
1613 * sized frames (typically 1536 bytes) to the controller.
1614 *
1615 * Jumbo Receive Producer Ring
1616 * - This ring is used to feed receive buffers for jumbo sized
1617 * frames (i.e. anything bigger than the "standard" frames)
1618 * to the controller.
1619 *
1620 * Mini Receive Producer Ring
1621 * - This ring is used to feed receive buffers for "mini"
1622 * sized frames to the controller.
1623 * - This feature required external memory for the controller
1624 * but was never used in a production system. Should always
1625 * be disabled.
1626 *
1627 * Receive Return Ring
1628 * - After the controller has placed an incoming frame into a
1629 * receive buffer that buffer is moved into a receive return
1630 * ring. The driver is then responsible to passing the
1631 * buffer up to the stack. Many versions of the controller
1632 * support multiple RR rings.
1633 *
1634 * Send Ring
1635 * - This ring is used for outgoing frames. Many versions of
1636 * the controller support multiple send rings.
1637 */
1638
1639 /* Initialize the standard receive producer ring control block. */
1640 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1641 rcb->bge_hostaddr.bge_addr_lo =
1642 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1643 rcb->bge_hostaddr.bge_addr_hi =
1644 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1645 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1646 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1647 if (BGE_IS_5717_PLUS(sc)) {
1648 /*
1649 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1650 * Bits 15-2 : Maximum RX frame size
1651 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1652 * Bit 0 : Reserved
1653 */
1654 rcb->bge_maxlen_flags =
1655 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1656 } else if (BGE_IS_5705_PLUS(sc)) {
1657 /*
1658 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1659 * Bits 15-2 : Reserved (should be 0)
1660 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1661 * Bit 0 : Reserved
1662 */
1663 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1664 } else {
1665 /*
1666 * Ring size is always XXX entries
1667 * Bits 31-16: Maximum RX frame size
1668 * Bits 15-2 : Reserved (should be 0)
1669 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1670 * Bit 0 : Reserved
1671 */
1672 rcb->bge_maxlen_flags =
1673 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1674 }
1675 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1676 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1677 else
1678 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1679 /* Write the standard receive producer ring control block. */
1680 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1681 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1682 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1683 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1684
1685 /* Reset the standard receive producer ring producer index. */
1686 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1687
1688 /*
1689 * Initialize the jumbo RX producer ring control
1690 * block. We set the 'ring disabled' bit in the
1691 * flags field until we're actually ready to start
1692 * using this ring (i.e. once we set the MTU
1693 * high enough to require it).
1694 */
1695 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1696 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1697 /* Get the jumbo receive producer ring RCB parameters. */
1698 rcb->bge_hostaddr.bge_addr_lo =
1699 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1700 rcb->bge_hostaddr.bge_addr_hi =
1701 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1702 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1703 sc->bge_cdata.bge_rx_jumbo_ring_map,
1704 BUS_DMASYNC_PREREAD);
1705 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1706 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1707 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1708 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1709 else
1710 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1711 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1712 rcb->bge_hostaddr.bge_addr_hi);
1713 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1714 rcb->bge_hostaddr.bge_addr_lo);
1715 /* Program the jumbo receive producer ring RCB parameters. */
1716 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1717 rcb->bge_maxlen_flags);
1718 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1719 /* Reset the jumbo receive producer ring producer index. */
1720 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1721 }
1722
1723 /* Disable the mini receive producer ring RCB. */
1724 if (BGE_IS_5700_FAMILY(sc)) {
1725 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1726 rcb->bge_maxlen_flags =
1727 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1728 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1729 rcb->bge_maxlen_flags);
1730 /* Reset the mini receive producer ring producer index. */
1731 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1732 }
1733
1734 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1735 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1736 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1737 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1738 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1739 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1740 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1741 }
1742 /*
1743 * The BD ring replenish thresholds control how often the
1744 * hardware fetches new BD's from the producer rings in host
1745 * memory. Setting the value too low on a busy system can
1746 * starve the hardware and recue the throughpout.
1747 *
1748 * Set the BD ring replentish thresholds. The recommended
1749 * values are 1/8th the number of descriptors allocated to
1750 * each ring.
1751 * XXX The 5754 requires a lower threshold, so it might be a
1752 * requirement of all 575x family chips. The Linux driver sets
1753 * the lower threshold for all 5705 family chips as well, but there
1754 * are reports that it might not need to be so strict.
1755 *
1756 * XXX Linux does some extra fiddling here for the 5906 parts as
1757 * well.
1758 */
1759 if (BGE_IS_5705_PLUS(sc))
1760 val = 8;
1761 else
1762 val = BGE_STD_RX_RING_CNT / 8;
1763 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1764 if (BGE_IS_JUMBO_CAPABLE(sc))
1765 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1766 BGE_JUMBO_RX_RING_CNT/8);
1767 if (BGE_IS_5717_PLUS(sc)) {
1768 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1769 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1770 }
1771
1772 /*
1773 * Disable all send rings by setting the 'ring disabled' bit
1774 * in the flags field of all the TX send ring control blocks,
1775 * located in NIC memory.
1776 */
1777 if (!BGE_IS_5705_PLUS(sc))
1778 /* 5700 to 5704 had 16 send rings. */
1779 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1780 else
1781 limit = 1;
1782 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1783 for (i = 0; i < limit; i++) {
1784 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1785 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1786 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1787 vrcb += sizeof(struct bge_rcb);
1788 }
1789
1790 /* Configure send ring RCB 0 (we use only the first ring) */
1791 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1792 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1793 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1794 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1795 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1796 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1797 else
1798 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1799 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1800 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1801 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1802
1803 /*
1804 * Disable all receive return rings by setting the
1805 * 'ring diabled' bit in the flags field of all the receive
1806 * return ring control blocks, located in NIC memory.
1807 */
1808 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1809 /* Should be 17, use 16 until we get an SRAM map. */
1810 limit = 16;
1811 } else if (!BGE_IS_5705_PLUS(sc))
1812 limit = BGE_RX_RINGS_MAX;
1813 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1814 limit = 4;
1815 else
1816 limit = 1;
1817 /* Disable all receive return rings. */
1818 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1819 for (i = 0; i < limit; i++) {
1820 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1821 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1822 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1823 BGE_RCB_FLAG_RING_DISABLED);
1824 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1825 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1826 (i * (sizeof(uint64_t))), 0);
1827 vrcb += sizeof(struct bge_rcb);
1828 }
1829
1830 /*
1831 * Set up receive return ring 0. Note that the NIC address
1832 * for RX return rings is 0x0. The return rings live entirely
1833 * within the host, so the nicaddr field in the RCB isn't used.
1834 */
1835 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1836 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1837 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1838 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1839 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1840 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1841 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1842
1843 /* Set random backoff seed for TX */
1844 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1845 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1846 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1847 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1848 BGE_TX_BACKOFF_SEED_MASK);
1849
1850 /* Set inter-packet gap */
1851 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1852
1853 /*
1854 * Specify which ring to use for packets that don't match
1855 * any RX rules.
1856 */
1857 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1858
1859 /*
1860 * Configure number of RX lists. One interrupt distribution
1861 * list, sixteen active lists, one bad frames class.
1862 */
1863 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1864
1865 /* Inialize RX list placement stats mask. */
1866 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1867 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1868
1869 /* Disable host coalescing until we get it set up */
1870 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1871
1872 /* Poll to make sure it's shut down. */
1873 for (i = 0; i < BGE_TIMEOUT; i++) {
1874 DELAY(10);
1875 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1876 break;
1877 }
1878
1879 if (i == BGE_TIMEOUT) {
1880 device_printf(sc->bge_dev,
1881 "host coalescing engine failed to idle\n");
1882 return (ENXIO);
1883 }
1884
1885 /* Set up host coalescing defaults */
1886 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1887 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1888 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1889 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1890 if (!(BGE_IS_5705_PLUS(sc))) {
1891 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1892 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1893 }
1894 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1895 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1896
1897 /* Set up address of statistics block */
1898 if (!(BGE_IS_5705_PLUS(sc))) {
1899 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1900 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1901 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1902 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1903 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1904 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1905 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1906 }
1907
1908 /* Set up address of status block */
1909 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1910 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1911 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1912 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1913
1914 /* Set up status block size. */
1915 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1916 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1917 val = BGE_STATBLKSZ_FULL;
1918 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1919 } else {
1920 val = BGE_STATBLKSZ_32BYTE;
1921 bzero(sc->bge_ldata.bge_status_block, 32);
1922 }
1923 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1924 sc->bge_cdata.bge_status_map,
1925 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1926
1927 /* Turn on host coalescing state machine */
1928 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1929
1930 /* Turn on RX BD completion state machine and enable attentions */
1931 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1932 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1933
1934 /* Turn on RX list placement state machine */
1935 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1936
1937 /* Turn on RX list selector state machine. */
1938 if (!(BGE_IS_5705_PLUS(sc)))
1939 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1940
1941 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1942 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1943 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1944 BGE_MACMODE_FRMHDR_DMA_ENB;
1945
1946 if (sc->bge_flags & BGE_FLAG_TBI)
1947 val |= BGE_PORTMODE_TBI;
1948 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1949 val |= BGE_PORTMODE_GMII;
1950 else
1951 val |= BGE_PORTMODE_MII;
1952
1953 /* Turn on DMA, clear stats */
1954 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1955
1956 /* Set misc. local control, enable interrupts on attentions */
1957 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1958
1959#ifdef notdef
1960 /* Assert GPIO pins for PHY reset */
1961 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1962 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1963 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1964 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1965#endif
1966
1967 /* Turn on DMA completion state machine */
1968 if (!(BGE_IS_5705_PLUS(sc)))
1969 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1970
1971 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1972
1973 /* Enable host coalescing bug fix. */
1974 if (BGE_IS_5755_PLUS(sc))
1975 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1976
1977 /* Request larger DMA burst size to get better performance. */
1978 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1979 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1980
1981 /* Turn on write DMA state machine */
1982 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1983 DELAY(40);
1984
1985 /* Turn on read DMA state machine */
1986 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1987
1988 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1989 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1990
1991 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1992 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1993 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1994 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1995 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1996 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1997 if (sc->bge_flags & BGE_FLAG_PCIE)
1998 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1999 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2000 val |= BGE_RDMAMODE_TSO4_ENABLE;
2001 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2002 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2003 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2004 val |= BGE_RDMAMODE_TSO6_ENABLE;
2005 }
2006 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2007 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2008 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2009 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2010 BGE_IS_5717_PLUS(sc)) {
2011 /*
2012 * Enable fix for read DMA FIFO overruns.
2013 * The fix is to limit the number of RX BDs
2014 * the hardware would fetch at a fime.
2015 */
2016 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
2017 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
2018 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2019 }
2020 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2021 DELAY(40);
2022
2023 /* Turn on RX data completion state machine */
2024 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2025
2026 /* Turn on RX BD initiator state machine */
2027 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2028
2029 /* Turn on RX data and RX BD initiator state machine */
2030 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2031
2032 /* Turn on Mbuf cluster free state machine */
2033 if (!(BGE_IS_5705_PLUS(sc)))
2034 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2035
2036 /* Turn on send BD completion state machine */
2037 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2038
2039 /* Turn on send data completion state machine */
2040 val = BGE_SDCMODE_ENABLE;
2041 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2042 val |= BGE_SDCMODE_CDELAY;
2043 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2044
2045 /* Turn on send data initiator state machine */
2046 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2047 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2048 BGE_SDIMODE_HW_LSO_PRE_DMA);
2049 else
2050 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2051
2052 /* Turn on send BD initiator state machine */
2053 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2054
2055 /* Turn on send BD selector state machine */
2056 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2057
2058 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2059 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2060 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2061
2062 /* ack/clear link change events */
2063 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2064 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2065 BGE_MACSTAT_LINK_CHANGED);
2066 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2067
2068 /*
2069 * Enable attention when the link has changed state for
2070 * devices that use auto polling.
2071 */
2072 if (sc->bge_flags & BGE_FLAG_TBI) {
2073 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2074 } else {
2075 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2076 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2077 DELAY(80);
2078 }
2079 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2080 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2081 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2082 BGE_EVTENB_MI_INTERRUPT);
2083 }
2084
2085 /*
2086 * Clear any pending link state attention.
2087 * Otherwise some link state change events may be lost until attention
2088 * is cleared by bge_intr() -> bge_link_upd() sequence.
2089 * It's not necessary on newer BCM chips - perhaps enabling link
2090 * state change attentions implies clearing pending attention.
2091 */
2092 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2093 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2094 BGE_MACSTAT_LINK_CHANGED);
2095
2096 /* Enable link state change attentions. */
2097 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2098
2099 return (0);
2100}
2101
2102const struct bge_revision *
2103bge_lookup_rev(uint32_t chipid)
2104{
2105 const struct bge_revision *br;
2106
2107 for (br = bge_revisions; br->br_name != NULL; br++) {
2108 if (br->br_chipid == chipid)
2109 return (br);
2110 }
2111
2112 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2113 if (br->br_chipid == BGE_ASICREV(chipid))
2114 return (br);
2115 }
2116
2117 return (NULL);
2118}
2119
2120const struct bge_vendor *
2121bge_lookup_vendor(uint16_t vid)
2122{
2123 const struct bge_vendor *v;
2124
2125 for (v = bge_vendors; v->v_name != NULL; v++)
2126 if (v->v_id == vid)
2127 return (v);
2128
2129 panic("%s: unknown vendor %d", __func__, vid);
2130 return (NULL);
2131}
2132
2133/*
2134 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2135 * against our list and return its name if we find a match.
2136 *
2137 * Note that since the Broadcom controller contains VPD support, we
2138 * try to get the device name string from the controller itself instead
2139 * of the compiled-in string. It guarantees we'll always announce the
2140 * right product name. We fall back to the compiled-in string when
2141 * VPD is unavailable or corrupt.
2142 */
2143static int
2144bge_probe(device_t dev)
2145{
2146 const struct bge_type *t = bge_devs;
2147 struct bge_softc *sc = device_get_softc(dev);
2148 uint16_t vid, did;
2149
2150 sc->bge_dev = dev;
2151 vid = pci_get_vendor(dev);
2152 did = pci_get_device(dev);
2153 while(t->bge_vid != 0) {
2154 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2155 char model[64], buf[96];
2156 const struct bge_revision *br;
2157 const struct bge_vendor *v;
2158 uint32_t id;
2159
2160 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2161 BGE_PCIMISCCTL_ASICREV_SHIFT;
2162 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2163 /*
2164 * Find the ASCI revision. Different chips
2165 * use different registers.
2166 */
2167 switch (pci_get_device(dev)) {
2168 case BCOM_DEVICEID_BCM5717:
2169 case BCOM_DEVICEID_BCM5718:
2170 id = pci_read_config(dev,
2171 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2172 break;
2173 default:
2174 id = pci_read_config(dev,
2175 BGE_PCI_PRODID_ASICREV, 4);
2176 }
2177 }
2178 br = bge_lookup_rev(id);
2179 v = bge_lookup_vendor(vid);
2180 {
2181#if __FreeBSD_version > 700024
2182 const char *pname;
2183
2184 if (bge_has_eaddr(sc) &&
2185 pci_get_vpd_ident(dev, &pname) == 0)
2186 snprintf(model, 64, "%s", pname);
2187 else
2188#endif
2189 snprintf(model, 64, "%s %s",
2190 v->v_name,
2191 br != NULL ? br->br_name :
2192 "NetXtreme Ethernet Controller");
2193 }
2194 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2195 br != NULL ? "" : "unknown ", id);
2196 device_set_desc_copy(dev, buf);
2197 return (0);
2198 }
2199 t++;
2200 }
2201
2202 return (ENXIO);
2203}
2204
2205static void
2206bge_dma_free(struct bge_softc *sc)
2207{
2208 int i;
2209
2210 /* Destroy DMA maps for RX buffers. */
2211 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2212 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2213 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2214 sc->bge_cdata.bge_rx_std_dmamap[i]);
2215 }
2216 if (sc->bge_cdata.bge_rx_std_sparemap)
2217 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2218 sc->bge_cdata.bge_rx_std_sparemap);
2219
2220 /* Destroy DMA maps for jumbo RX buffers. */
2221 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2222 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2223 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2224 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2225 }
2226 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2227 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2228 sc->bge_cdata.bge_rx_jumbo_sparemap);
2229
2230 /* Destroy DMA maps for TX buffers. */
2231 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2232 if (sc->bge_cdata.bge_tx_dmamap[i])
2233 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2234 sc->bge_cdata.bge_tx_dmamap[i]);
2235 }
2236
2237 if (sc->bge_cdata.bge_rx_mtag)
2238 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2239 if (sc->bge_cdata.bge_tx_mtag)
2240 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2241
2242
2243 /* Destroy standard RX ring. */
2244 if (sc->bge_cdata.bge_rx_std_ring_map)
2245 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2246 sc->bge_cdata.bge_rx_std_ring_map);
2247 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2248 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2249 sc->bge_ldata.bge_rx_std_ring,
2250 sc->bge_cdata.bge_rx_std_ring_map);
2251
2252 if (sc->bge_cdata.bge_rx_std_ring_tag)
2253 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2254
2255 /* Destroy jumbo RX ring. */
2256 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2257 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2258 sc->bge_cdata.bge_rx_jumbo_ring_map);
2259
2260 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2261 sc->bge_ldata.bge_rx_jumbo_ring)
2262 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2263 sc->bge_ldata.bge_rx_jumbo_ring,
2264 sc->bge_cdata.bge_rx_jumbo_ring_map);
2265
2266 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2267 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2268
2269 /* Destroy RX return ring. */
2270 if (sc->bge_cdata.bge_rx_return_ring_map)
2271 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2272 sc->bge_cdata.bge_rx_return_ring_map);
2273
2274 if (sc->bge_cdata.bge_rx_return_ring_map &&
2275 sc->bge_ldata.bge_rx_return_ring)
2276 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2277 sc->bge_ldata.bge_rx_return_ring,
2278 sc->bge_cdata.bge_rx_return_ring_map);
2279
2280 if (sc->bge_cdata.bge_rx_return_ring_tag)
2281 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2282
2283 /* Destroy TX ring. */
2284 if (sc->bge_cdata.bge_tx_ring_map)
2285 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2286 sc->bge_cdata.bge_tx_ring_map);
2287
2288 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2289 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2290 sc->bge_ldata.bge_tx_ring,
2291 sc->bge_cdata.bge_tx_ring_map);
2292
2293 if (sc->bge_cdata.bge_tx_ring_tag)
2294 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2295
2296 /* Destroy status block. */
2297 if (sc->bge_cdata.bge_status_map)
2298 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2299 sc->bge_cdata.bge_status_map);
2300
2301 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2302 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2303 sc->bge_ldata.bge_status_block,
2304 sc->bge_cdata.bge_status_map);
2305
2306 if (sc->bge_cdata.bge_status_tag)
2307 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2308
2309 /* Destroy statistics block. */
2310 if (sc->bge_cdata.bge_stats_map)
2311 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2312 sc->bge_cdata.bge_stats_map);
2313
2314 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2315 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2316 sc->bge_ldata.bge_stats,
2317 sc->bge_cdata.bge_stats_map);
2318
2319 if (sc->bge_cdata.bge_stats_tag)
2320 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2321
2322 if (sc->bge_cdata.bge_buffer_tag)
2323 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2324
2325 /* Destroy the parent tag. */
2326 if (sc->bge_cdata.bge_parent_tag)
2327 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2328}
2329
2330static int
2331bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2332 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2333 bus_addr_t *paddr, const char *msg)
2334{
2335 struct bge_dmamap_arg ctx;
2336 bus_addr_t lowaddr;
2337 bus_size_t ring_end;
2338 int error;
2339
2340 lowaddr = BUS_SPACE_MAXADDR;
2341again:
2342 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2343 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2344 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2345 if (error != 0) {
2346 device_printf(sc->bge_dev,
2347 "could not create %s dma tag\n", msg);
2348 return (ENOMEM);
2349 }
2350 /* Allocate DMA'able memory for ring. */
2351 error = bus_dmamem_alloc(*tag, (void **)ring,
2352 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2353 if (error != 0) {
2354 device_printf(sc->bge_dev,
2355 "could not allocate DMA'able memory for %s\n", msg);
2356 return (ENOMEM);
2357 }
2358 /* Load the address of the ring. */
2359 ctx.bge_busaddr = 0;
2360 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2361 &ctx, BUS_DMA_NOWAIT);
2362 if (error != 0) {
2363 device_printf(sc->bge_dev,
2364 "could not load DMA'able memory for %s\n", msg);
2365 return (ENOMEM);
2366 }
2367 *paddr = ctx.bge_busaddr;
2368 ring_end = *paddr + maxsize;
2369 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2370 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2371 /*
2372 * 4GB boundary crossed. Limit maximum allowable DMA
2373 * address space to 32bit and try again.
2374 */
2375 bus_dmamap_unload(*tag, *map);
2376 bus_dmamem_free(*tag, *ring, *map);
2377 bus_dma_tag_destroy(*tag);
2378 if (bootverbose)
2379 device_printf(sc->bge_dev, "4GB boundary crossed, "
2380 "limit DMA address space to 32bit for %s\n", msg);
2381 *ring = NULL;
2382 *tag = NULL;
2383 *map = NULL;
2384 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2385 goto again;
2386 }
2387 return (0);
2388}
2389
2390static int
2391bge_dma_alloc(struct bge_softc *sc)
2392{
2393 bus_addr_t lowaddr;
2394 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2395 int i, error;
2396
2397 lowaddr = BUS_SPACE_MAXADDR;
2398 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2399 lowaddr = BGE_DMA_MAXADDR;
2400 /*
2401 * Allocate the parent bus DMA tag appropriate for PCI.
2402 */
2403 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2404 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2405 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2406 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2407 if (error != 0) {
2408 device_printf(sc->bge_dev,
2409 "could not allocate parent dma tag\n");
2410 return (ENOMEM);
2411 }
2412
2413 /* Create tag for standard RX ring. */
2414 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2415 &sc->bge_cdata.bge_rx_std_ring_tag,
2416 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2417 &sc->bge_cdata.bge_rx_std_ring_map,
2418 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2419 if (error)
2420 return (error);
2421
2422 /* Create tag for RX return ring. */
2423 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2424 &sc->bge_cdata.bge_rx_return_ring_tag,
2425 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2426 &sc->bge_cdata.bge_rx_return_ring_map,
2427 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2428 if (error)
2429 return (error);
2430
2431 /* Create tag for TX ring. */
2432 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2433 &sc->bge_cdata.bge_tx_ring_tag,
2434 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2435 &sc->bge_cdata.bge_tx_ring_map,
2436 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2437 if (error)
2438 return (error);
2439
2440 /*
2441 * Create tag for status block.
2442 * Because we only use single Tx/Rx/Rx return ring, use
2443 * minimum status block size except BCM5700 AX/BX which
2444 * seems to want to see full status block size regardless
2445 * of configured number of ring.
2446 */
2447 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2448 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2449 sbsz = BGE_STATUS_BLK_SZ;
2450 else
2451 sbsz = 32;
2452 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2453 &sc->bge_cdata.bge_status_tag,
2454 (uint8_t **)&sc->bge_ldata.bge_status_block,
2455 &sc->bge_cdata.bge_status_map,
2456 &sc->bge_ldata.bge_status_block_paddr, "status block");
2457 if (error)
2458 return (error);
2459
2460 /* Create tag for statistics block. */
2461 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2462 &sc->bge_cdata.bge_stats_tag,
2463 (uint8_t **)&sc->bge_ldata.bge_stats,
2464 &sc->bge_cdata.bge_stats_map,
2465 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2466 if (error)
2467 return (error);
2468
2469 /* Create tag for jumbo RX ring. */
2470 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2471 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2472 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2473 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2474 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2475 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2476 if (error)
2477 return (error);
2478 }
2479
2480 /* Create parent tag for buffers. */
2481 boundary = 0;
2482 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2483 boundary = BGE_DMA_BNDRY;
2484 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2485 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2486 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2487 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2488 if (error != 0) {
2489 device_printf(sc->bge_dev,
2490 "could not allocate buffer dma tag\n");
2491 return (ENOMEM);
2492 }
2493 /* Create tag for Tx mbufs. */
2494 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2495 txsegsz = BGE_TSOSEG_SZ;
2496 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2497 } else {
2498 txsegsz = MCLBYTES;
2499 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2500 }
2501 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2502 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2503 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2504 &sc->bge_cdata.bge_tx_mtag);
2505
2506 if (error) {
2507 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2508 return (ENOMEM);
2509 }
2510
2511 /* Create tag for Rx mbufs. */
2512 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2513 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2514 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2515
2516 if (error) {
2517 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2518 return (ENOMEM);
2519 }
2520
2521 /* Create DMA maps for RX buffers. */
2522 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2523 &sc->bge_cdata.bge_rx_std_sparemap);
2524 if (error) {
2525 device_printf(sc->bge_dev,
2526 "can't create spare DMA map for RX\n");
2527 return (ENOMEM);
2528 }
2529 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2530 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2531 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2532 if (error) {
2533 device_printf(sc->bge_dev,
2534 "can't create DMA map for RX\n");
2535 return (ENOMEM);
2536 }
2537 }
2538
2539 /* Create DMA maps for TX buffers. */
2540 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2541 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2542 &sc->bge_cdata.bge_tx_dmamap[i]);
2543 if (error) {
2544 device_printf(sc->bge_dev,
2545 "can't create DMA map for TX\n");
2546 return (ENOMEM);
2547 }
2548 }
2549
2550 /* Create tags for jumbo RX buffers. */
2551 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2552 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2553 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2554 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2555 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2556 if (error) {
2557 device_printf(sc->bge_dev,
2558 "could not allocate jumbo dma tag\n");
2559 return (ENOMEM);
2560 }
2561 /* Create DMA maps for jumbo RX buffers. */
2562 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2563 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2564 if (error) {
2565 device_printf(sc->bge_dev,
2566 "can't create spare DMA map for jumbo RX\n");
2567 return (ENOMEM);
2568 }
2569 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2570 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2571 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2572 if (error) {
2573 device_printf(sc->bge_dev,
2574 "can't create DMA map for jumbo RX\n");
2575 return (ENOMEM);
2576 }
2577 }
2578 }
2579
2580 return (0);
2581}
2582
2583/*
2584 * Return true if this device has more than one port.
2585 */
2586static int
2587bge_has_multiple_ports(struct bge_softc *sc)
2588{
2589 device_t dev = sc->bge_dev;
2590 u_int b, d, f, fscan, s;
2591
2592 d = pci_get_domain(dev);
2593 b = pci_get_bus(dev);
2594 s = pci_get_slot(dev);
2595 f = pci_get_function(dev);
2596 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2597 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2598 return (1);
2599 return (0);
2600}
2601
2602/*
2603 * Return true if MSI can be used with this device.
2604 */
2605static int
2606bge_can_use_msi(struct bge_softc *sc)
2607{
2608 int can_use_msi = 0;
2609
2610 /* Disable MSI for polling(4). */
2611#ifdef DEVICE_POLLING
2612 return (0);
2613#endif
2614 switch (sc->bge_asicrev) {
2615 case BGE_ASICREV_BCM5714_A0:
2616 case BGE_ASICREV_BCM5714:
2617 /*
2618 * Apparently, MSI doesn't work when these chips are
2619 * configured in single-port mode.
2620 */
2621 if (bge_has_multiple_ports(sc))
2622 can_use_msi = 1;
2623 break;
2624 case BGE_ASICREV_BCM5750:
2625 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2626 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2627 can_use_msi = 1;
2628 break;
2629 default:
2630 if (BGE_IS_575X_PLUS(sc))
2631 can_use_msi = 1;
2632 }
2633 return (can_use_msi);
2634}
2635
2636static int
2637bge_attach(device_t dev)
2638{
2639 struct ifnet *ifp;
2640 struct bge_softc *sc;
2641 uint32_t hwcfg = 0, misccfg;
2642 u_char eaddr[ETHER_ADDR_LEN];
2643 int error, f, msicount, phy_addr, reg, rid, trys;
2644
2645 sc = device_get_softc(dev);
2646 sc->bge_dev = dev;
2647
2648 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2649
2650 /*
2651 * Map control/status registers.
2652 */
2653 pci_enable_busmaster(dev);
2654
2655 rid = PCIR_BAR(0);
2656 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2657 RF_ACTIVE);
2658
2659 if (sc->bge_res == NULL) {
2660 device_printf (sc->bge_dev, "couldn't map memory\n");
2661 error = ENXIO;
2662 goto fail;
2663 }
2664
2665 /* Save various chip information. */
2666 sc->bge_chipid =
2667 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2668 BGE_PCIMISCCTL_ASICREV_SHIFT;
2669 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2670 /*
2671 * Find the ASCI revision. Different chips use different
2672 * registers.
2673 */
2674 switch (pci_get_device(dev)) {
2675 case BCOM_DEVICEID_BCM5717:
2676 case BCOM_DEVICEID_BCM5718:
2677 sc->bge_chipid = pci_read_config(dev,
2678 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2679 break;
2680 default:
2681 sc->bge_chipid = pci_read_config(dev,
2682 BGE_PCI_PRODID_ASICREV, 4);
2683 }
2684 }
2685 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2686 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2687
2688 /* Set default PHY address. */
2689 phy_addr = 1;
2690 /*
2691 * PHY address mapping for various devices.
2692 *
2693 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2694 * ---------+-------+-------+-------+-------+
2695 * BCM57XX | 1 | X | X | X |
2696 * BCM5704 | 1 | X | 1 | X |
2697 * BCM5717 | 1 | 8 | 2 | 9 |
2698 *
2699 * Other addresses may respond but they are not
2700 * IEEE compliant PHYs and should be ignored.
2701 */
2702 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
2703 f = pci_get_function(dev);
2704 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2705 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2706 BGE_SGDIGSTS_IS_SERDES)
2707 phy_addr = f + 8;
2708 else
2709 phy_addr = f + 1;
2710 } else if (sc->bge_chipid == BGE_CHIPID_BCM5717_B0) {
2711 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2712 BGE_CPMU_PHY_STRAP_IS_SERDES)
2713 phy_addr = f + 8;
2714 else
2715 phy_addr = f + 1;
2716 }
2717 }
2718
2719 /*
2720 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2721 * 5705 A0 and A1 chips.
2722 */
2723 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2724 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2725 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2726 sc->bge_chipid != BGE_CHIPID_BCM5705_A1 &&
2727 !BGE_IS_5717_PLUS(sc))
2728 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2729
2730 if (bge_has_eaddr(sc))
2731 sc->bge_flags |= BGE_FLAG_EADDR;
2732
2733 /* Save chipset family. */
2734 switch (sc->bge_asicrev) {
2735 case BGE_ASICREV_BCM5717:
2736 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2737 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2738 BGE_FLAG_SHORT_DMA_BUG | BGE_FLAG_JUMBO_FRAME;
2739 break;
2740 case BGE_ASICREV_BCM5755:
2741 case BGE_ASICREV_BCM5761:
2742 case BGE_ASICREV_BCM5784:
2743 case BGE_ASICREV_BCM5785:
2744 case BGE_ASICREV_BCM5787:
2745 case BGE_ASICREV_BCM57780:
2746 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2747 BGE_FLAG_5705_PLUS;
2748 break;
2749 case BGE_ASICREV_BCM5700:
2750 case BGE_ASICREV_BCM5701:
2751 case BGE_ASICREV_BCM5703:
2752 case BGE_ASICREV_BCM5704:
2753 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2754 break;
2755 case BGE_ASICREV_BCM5714_A0:
2756 case BGE_ASICREV_BCM5780:
2757 case BGE_ASICREV_BCM5714:
2758 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2759 /* FALLTHROUGH */
2760 case BGE_ASICREV_BCM5750:
2761 case BGE_ASICREV_BCM5752:
2762 case BGE_ASICREV_BCM5906:
2763 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2764 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2765 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2766 /* FALLTHROUGH */
2767 case BGE_ASICREV_BCM5705:
2768 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2769 break;
2770 }
2771
2772 /* Set various PHY bug flags. */
2773 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2774 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2775 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2776 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2777 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2778 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2779 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2780 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2781 if (pci_get_subvendor(dev) == DELL_VENDORID)
2782 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2783 if ((BGE_IS_5705_PLUS(sc)) &&
2784 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2785 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2786 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2787 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2788 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2789 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2790 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2791 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2792 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2793 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2794 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2795 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2796 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2797 } else
2798 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2799 }
2800
2801 /* Identify the chips that use an CPMU. */
2802 if (BGE_IS_5717_PLUS(sc) ||
2803 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2804 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2805 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2806 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2807 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2808 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2809 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2810 else
2811 sc->bge_mi_mode = BGE_MIMODE_BASE;
2812 /* Enable auto polling for BCM570[0-5]. */
2813 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2814 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2815
2816 /*
2817 * All controllers that are not 5755 or higher have 4GB
2818 * boundary DMA bug.
2819 * Whenever an address crosses a multiple of the 4GB boundary
2820 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2821 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2822 * state machine will lockup and cause the device to hang.
2823 */
2824 if (BGE_IS_5755_PLUS(sc) == 0)
2825 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2826
2827 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2828 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2829 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2830 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2831 sc->bge_flags |= BGE_FLAG_5788;
2832 }
2833
2834 /*
2835 * Some controllers seem to require a special firmware to use
2836 * TSO. But the firmware is not available to FreeBSD and Linux
2837 * claims that the TSO performed by the firmware is slower than
2838 * hardware based TSO. Moreover the firmware based TSO has one
2839 * known bug which can't handle TSO if ethernet header + IP/TCP
2840 * header is greater than 80 bytes. The workaround for the TSO
2841 * bug exist but it seems it's too expensive than not using
2842 * TSO at all. Some hardwares also have the TSO bug so limit
2843 * the TSO to the controllers that are not affected TSO issues
2844 * (e.g. 5755 or higher).
2845 */
2846 if (BGE_IS_5717_PLUS(sc)) {
2847 /* BCM5717 requires different TSO configuration. */
2848 sc->bge_flags |= BGE_FLAG_TSO3;
2849 } else if (BGE_IS_5755_PLUS(sc)) {
2850 /*
2851 * BCM5754 and BCM5787 shares the same ASIC id so
2852 * explicit device id check is required.
2853 * Due to unknown reason TSO does not work on BCM5755M.
2854 */
2855 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2856 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2857 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2858 sc->bge_flags |= BGE_FLAG_TSO;
2859 }
2860
2861 /*
2862 * Check if this is a PCI-X or PCI Express device.
2863 */
2864 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2865 /*
2866 * Found a PCI Express capabilities register, this
2867 * must be a PCI Express device.
2868 */
2869 sc->bge_flags |= BGE_FLAG_PCIE;
2870 sc->bge_expcap = reg;
2871 if (pci_get_max_read_req(dev) != 4096)
2872 pci_set_max_read_req(dev, 4096);
2873 } else {
2874 /*
2875 * Check if the device is in PCI-X Mode.
2876 * (This bit is not valid on PCI Express controllers.)
2877 */
2878 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2879 sc->bge_pcixcap = reg;
2880 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2881 BGE_PCISTATE_PCI_BUSMODE) == 0)
2882 sc->bge_flags |= BGE_FLAG_PCIX;
2883 }
2884
2885 /*
2886 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2887 * not actually a MAC controller bug but an issue with the embedded
2888 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2889 */
2890 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2891 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2892 /*
2893 * Allocate the interrupt, using MSI if possible. These devices
2894 * support 8 MSI messages, but only the first one is used in
2895 * normal operation.
2896 */
2897 rid = 0;
2898 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2899 sc->bge_msicap = reg;
2900 if (bge_can_use_msi(sc)) {
2901 msicount = pci_msi_count(dev);
2902 if (msicount > 1)
2903 msicount = 1;
2904 } else
2905 msicount = 0;
2906 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2907 rid = 1;
2908 sc->bge_flags |= BGE_FLAG_MSI;
2909 }
2910 }
2911
2912 /*
2913 * All controllers except BCM5700 supports tagged status but
2914 * we use tagged status only for MSI case on BCM5717. Otherwise
2915 * MSI on BCM5717 does not work.
2916 */
2917#ifndef DEVICE_POLLING
2918 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
2919 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
2920#endif
2921
2922 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2923 RF_SHAREABLE | RF_ACTIVE);
2924
2925 if (sc->bge_irq == NULL) {
2926 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2927 error = ENXIO;
2928 goto fail;
2929 }
2930
2931 device_printf(dev,
2932 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2933 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2934 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2935 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2936
2937 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2938
2939 /* Try to reset the chip. */
2940 if (bge_reset(sc)) {
2941 device_printf(sc->bge_dev, "chip reset failed\n");
2942 error = ENXIO;
2943 goto fail;
2944 }
2945
2946 sc->bge_asf_mode = 0;
2947 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2948 == BGE_MAGIC_NUMBER)) {
2949 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2950 & BGE_HWCFG_ASF) {
2951 sc->bge_asf_mode |= ASF_ENABLE;
2952 sc->bge_asf_mode |= ASF_STACKUP;
2953 if (BGE_IS_575X_PLUS(sc))
2954 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2955 }
2956 }
2957
2958 /* Try to reset the chip again the nice way. */
2959 bge_stop_fw(sc);
2960 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2961 if (bge_reset(sc)) {
2962 device_printf(sc->bge_dev, "chip reset failed\n");
2963 error = ENXIO;
2964 goto fail;
2965 }
2966
2967 bge_sig_legacy(sc, BGE_RESET_STOP);
2968 bge_sig_post_reset(sc, BGE_RESET_STOP);
2969
2970 if (bge_chipinit(sc)) {
2971 device_printf(sc->bge_dev, "chip initialization failed\n");
2972 error = ENXIO;
2973 goto fail;
2974 }
2975
2976 error = bge_get_eaddr(sc, eaddr);
2977 if (error) {
2978 device_printf(sc->bge_dev,
2979 "failed to read station address\n");
2980 error = ENXIO;
2981 goto fail;
2982 }
2983
2984 /* 5705 limits RX return ring to 512 entries. */
2985 if (BGE_IS_5717_PLUS(sc))
2986 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2987 else if (BGE_IS_5705_PLUS(sc))
2988 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2989 else
2990 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2991
2992 if (bge_dma_alloc(sc)) {
2993 device_printf(sc->bge_dev,
2994 "failed to allocate DMA resources\n");
2995 error = ENXIO;
2996 goto fail;
2997 }
2998
2999 bge_add_sysctls(sc);
3000
3001 /* Set default tuneable values. */
3002 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3003 sc->bge_rx_coal_ticks = 150;
3004 sc->bge_tx_coal_ticks = 150;
3005 sc->bge_rx_max_coal_bds = 10;
3006 sc->bge_tx_max_coal_bds = 10;
3007
3008 /* Initialize checksum features to use. */
3009 sc->bge_csum_features = BGE_CSUM_FEATURES;
3010 if (sc->bge_forced_udpcsum != 0)
3011 sc->bge_csum_features |= CSUM_UDP;
3012
3013 /* Set up ifnet structure */
3014 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3015 if (ifp == NULL) {
3016 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3017 error = ENXIO;
3018 goto fail;
3019 }
3020 ifp->if_softc = sc;
3021 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3022 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3023 ifp->if_ioctl = bge_ioctl;
3024 ifp->if_start = bge_start;
3025 ifp->if_init = bge_init;
3026 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3027 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3028 IFQ_SET_READY(&ifp->if_snd);
3029 ifp->if_hwassist = sc->bge_csum_features;
3030 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3031 IFCAP_VLAN_MTU;
3032 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3033 ifp->if_hwassist |= CSUM_TSO;
3034 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3035 }
3036#ifdef IFCAP_VLAN_HWCSUM
3037 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3038#endif
3039 ifp->if_capenable = ifp->if_capabilities;
3040#ifdef DEVICE_POLLING
3041 ifp->if_capabilities |= IFCAP_POLLING;
3042#endif
3043
3044 /*
3045 * 5700 B0 chips do not support checksumming correctly due
3046 * to hardware bugs.
3047 */
3048 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3049 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3050 ifp->if_capenable &= ~IFCAP_HWCSUM;
3051 ifp->if_hwassist = 0;
3052 }
3053
3054 /*
3055 * Figure out what sort of media we have by checking the
3056 * hardware config word in the first 32k of NIC internal memory,
3057 * or fall back to examining the EEPROM if necessary.
3058 * Note: on some BCM5700 cards, this value appears to be unset.
3059 * If that's the case, we have to rely on identifying the NIC
3060 * by its PCI subsystem ID, as we do below for the SysKonnect
3061 * SK-9D41.
3062 */
3063 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3064 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3065 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3066 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3067 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3068 sizeof(hwcfg))) {
3069 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3070 error = ENXIO;
3071 goto fail;
3072 }
3073 hwcfg = ntohl(hwcfg);
3074 }
3075
3076 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3077 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3078 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3079 if (BGE_IS_5714_FAMILY(sc))
3080 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3081 else
3082 sc->bge_flags |= BGE_FLAG_TBI;
3083 }
3084
3085 if (sc->bge_flags & BGE_FLAG_TBI) {
3086 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3087 bge_ifmedia_sts);
3088 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3089 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3090 0, NULL);
3091 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3092 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3093 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3094 } else {
3095 /*
3096 * Do transceiver setup and tell the firmware the
3097 * driver is down so we can try to get access the
3098 * probe if ASF is running. Retry a couple of times
3099 * if we get a conflict with the ASF firmware accessing
3100 * the PHY.
3101 */
3102 trys = 0;
3103 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3104again:
3105 bge_asf_driver_up(sc);
3106
3105 error = (mii_attach(dev, &sc->bge_miibus, ifp,
3107 error = mii_attach(dev, &sc->bge_miibus, ifp,
3106 bge_ifmedia_upd, bge_ifmedia_sts, BMSR_DEFCAPMASK,
3108 bge_ifmedia_upd, bge_ifmedia_sts, BMSR_DEFCAPMASK,
3107 phy_addr, MII_OFFSET_ANY, 0));
3109 phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
3108 if (error != 0) {
3109 if (trys++ < 4) {
3110 device_printf(sc->bge_dev, "Try again\n");
3111 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3112 BMCR_RESET);
3113 goto again;
3114 }
3115 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3116 goto fail;
3117 }
3118
3119 /*
3120 * Now tell the firmware we are going up after probing the PHY
3121 */
3122 if (sc->bge_asf_mode & ASF_STACKUP)
3123 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3124 }
3125
3126 /*
3127 * When using the BCM5701 in PCI-X mode, data corruption has
3128 * been observed in the first few bytes of some received packets.
3129 * Aligning the packet buffer in memory eliminates the corruption.
3130 * Unfortunately, this misaligns the packet payloads. On platforms
3131 * which do not support unaligned accesses, we will realign the
3132 * payloads by copying the received packets.
3133 */
3134 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3135 sc->bge_flags & BGE_FLAG_PCIX)
3136 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3137
3138 /*
3139 * Call MI attach routine.
3140 */
3141 ether_ifattach(ifp, eaddr);
3142 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3143
3144 /* Tell upper layer we support long frames. */
3145 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3146
3147 /*
3148 * Hookup IRQ last.
3149 */
3150#if __FreeBSD_version > 700030
3151 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3152 /* Take advantage of single-shot MSI. */
3153 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3154 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3155 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3156 taskqueue_thread_enqueue, &sc->bge_tq);
3157 if (sc->bge_tq == NULL) {
3158 device_printf(dev, "could not create taskqueue.\n");
3159 ether_ifdetach(ifp);
3160 error = ENXIO;
3161 goto fail;
3162 }
3163 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3164 device_get_nameunit(sc->bge_dev));
3165 error = bus_setup_intr(dev, sc->bge_irq,
3166 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3167 &sc->bge_intrhand);
3168 if (error)
3169 ether_ifdetach(ifp);
3170 } else
3171 error = bus_setup_intr(dev, sc->bge_irq,
3172 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3173 &sc->bge_intrhand);
3174#else
3175 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
3176 bge_intr, sc, &sc->bge_intrhand);
3177#endif
3178
3179 if (error) {
3180 bge_detach(dev);
3181 device_printf(sc->bge_dev, "couldn't set up irq\n");
3182 }
3183
3184 return (0);
3185
3186fail:
3187 bge_release_resources(sc);
3188
3189 return (error);
3190}
3191
3192static int
3193bge_detach(device_t dev)
3194{
3195 struct bge_softc *sc;
3196 struct ifnet *ifp;
3197
3198 sc = device_get_softc(dev);
3199 ifp = sc->bge_ifp;
3200
3201#ifdef DEVICE_POLLING
3202 if (ifp->if_capenable & IFCAP_POLLING)
3203 ether_poll_deregister(ifp);
3204#endif
3205
3206 BGE_LOCK(sc);
3207 bge_stop(sc);
3208 bge_reset(sc);
3209 BGE_UNLOCK(sc);
3210
3211 callout_drain(&sc->bge_stat_ch);
3212
3213 if (sc->bge_tq)
3214 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3215 ether_ifdetach(ifp);
3216
3217 if (sc->bge_flags & BGE_FLAG_TBI) {
3218 ifmedia_removeall(&sc->bge_ifmedia);
3219 } else {
3220 bus_generic_detach(dev);
3221 device_delete_child(dev, sc->bge_miibus);
3222 }
3223
3224 bge_release_resources(sc);
3225
3226 return (0);
3227}
3228
3229static void
3230bge_release_resources(struct bge_softc *sc)
3231{
3232 device_t dev;
3233
3234 dev = sc->bge_dev;
3235
3236 if (sc->bge_tq != NULL)
3237 taskqueue_free(sc->bge_tq);
3238
3239 if (sc->bge_intrhand != NULL)
3240 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3241
3242 if (sc->bge_irq != NULL)
3243 bus_release_resource(dev, SYS_RES_IRQ,
3244 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3245
3246 if (sc->bge_flags & BGE_FLAG_MSI)
3247 pci_release_msi(dev);
3248
3249 if (sc->bge_res != NULL)
3250 bus_release_resource(dev, SYS_RES_MEMORY,
3251 PCIR_BAR(0), sc->bge_res);
3252
3253 if (sc->bge_ifp != NULL)
3254 if_free(sc->bge_ifp);
3255
3256 bge_dma_free(sc);
3257
3258 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3259 BGE_LOCK_DESTROY(sc);
3260}
3261
3262static int
3263bge_reset(struct bge_softc *sc)
3264{
3265 device_t dev;
3266 uint32_t cachesize, command, pcistate, reset, val;
3267 void (*write_op)(struct bge_softc *, int, int);
3268 uint16_t devctl;
3269 int i;
3270
3271 dev = sc->bge_dev;
3272
3273 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3274 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3275 if (sc->bge_flags & BGE_FLAG_PCIE)
3276 write_op = bge_writemem_direct;
3277 else
3278 write_op = bge_writemem_ind;
3279 } else
3280 write_op = bge_writereg_ind;
3281
3282 /* Save some important PCI state. */
3283 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3284 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3285 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3286
3287 pci_write_config(dev, BGE_PCI_MISC_CTL,
3288 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3289 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3290
3291 /* Disable fastboot on controllers that support it. */
3292 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3293 BGE_IS_5755_PLUS(sc)) {
3294 if (bootverbose)
3295 device_printf(dev, "Disabling fastboot\n");
3296 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3297 }
3298
3299 /*
3300 * Write the magic number to SRAM at offset 0xB50.
3301 * When firmware finishes its initialization it will
3302 * write ~BGE_MAGIC_NUMBER to the same location.
3303 */
3304 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3305
3306 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3307
3308 /* XXX: Broadcom Linux driver. */
3309 if (sc->bge_flags & BGE_FLAG_PCIE) {
3310 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3311 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3312 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3313 /* Prevent PCIE link training during global reset */
3314 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3315 reset |= 1 << 29;
3316 }
3317 }
3318
3319 /*
3320 * Set GPHY Power Down Override to leave GPHY
3321 * powered up in D0 uninitialized.
3322 */
3323 if (BGE_IS_5705_PLUS(sc))
3324 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3325
3326 /* Issue global reset */
3327 write_op(sc, BGE_MISC_CFG, reset);
3328
3329 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3330 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3331 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3332 val | BGE_VCPU_STATUS_DRV_RESET);
3333 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3334 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3335 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3336 }
3337
3338 DELAY(1000);
3339
3340 /* XXX: Broadcom Linux driver. */
3341 if (sc->bge_flags & BGE_FLAG_PCIE) {
3342 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3343 DELAY(500000); /* wait for link training to complete */
3344 val = pci_read_config(dev, 0xC4, 4);
3345 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3346 }
3347 devctl = pci_read_config(dev,
3348 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3349 /* Clear enable no snoop and disable relaxed ordering. */
3350 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3351 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3352 /* Set PCIE max payload size to 128. */
3353 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3354 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3355 devctl, 2);
3356 /* Clear error status. */
3357 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3358 PCIM_EXP_STA_CORRECTABLE_ERROR |
3359 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3360 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3361 }
3362
3363 /* Reset some of the PCI state that got zapped by reset. */
3364 pci_write_config(dev, BGE_PCI_MISC_CTL,
3365 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3366 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3367 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3368 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3369 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3370 /*
3371 * Disable PCI-X relaxed ordering to ensure status block update
3372 * comes first then packet buffer DMA. Otherwise driver may
3373 * read stale status block.
3374 */
3375 if (sc->bge_flags & BGE_FLAG_PCIX) {
3376 devctl = pci_read_config(dev,
3377 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3378 devctl &= ~PCIXM_COMMAND_ERO;
3379 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3380 devctl &= ~PCIXM_COMMAND_MAX_READ;
3381 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3382 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3383 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3384 PCIXM_COMMAND_MAX_READ);
3385 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3386 }
3387 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3388 devctl, 2);
3389 }
3390 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3391 if (BGE_IS_5714_FAMILY(sc)) {
3392 /* This chip disables MSI on reset. */
3393 if (sc->bge_flags & BGE_FLAG_MSI) {
3394 val = pci_read_config(dev,
3395 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3396 pci_write_config(dev,
3397 sc->bge_msicap + PCIR_MSI_CTRL,
3398 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3399 val = CSR_READ_4(sc, BGE_MSI_MODE);
3400 CSR_WRITE_4(sc, BGE_MSI_MODE,
3401 val | BGE_MSIMODE_ENABLE);
3402 }
3403 val = CSR_READ_4(sc, BGE_MARB_MODE);
3404 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3405 } else
3406 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3407
3408 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3409 for (i = 0; i < BGE_TIMEOUT; i++) {
3410 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3411 if (val & BGE_VCPU_STATUS_INIT_DONE)
3412 break;
3413 DELAY(100);
3414 }
3415 if (i == BGE_TIMEOUT) {
3416 device_printf(dev, "reset timed out\n");
3417 return (1);
3418 }
3419 } else {
3420 /*
3421 * Poll until we see the 1's complement of the magic number.
3422 * This indicates that the firmware initialization is complete.
3423 * We expect this to fail if no chip containing the Ethernet
3424 * address is fitted though.
3425 */
3426 for (i = 0; i < BGE_TIMEOUT; i++) {
3427 DELAY(10);
3428 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3429 if (val == ~BGE_MAGIC_NUMBER)
3430 break;
3431 }
3432
3433 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3434 device_printf(dev,
3435 "firmware handshake timed out, found 0x%08x\n",
3436 val);
3437 }
3438
3439 /*
3440 * XXX Wait for the value of the PCISTATE register to
3441 * return to its original pre-reset state. This is a
3442 * fairly good indicator of reset completion. If we don't
3443 * wait for the reset to fully complete, trying to read
3444 * from the device's non-PCI registers may yield garbage
3445 * results.
3446 */
3447 for (i = 0; i < BGE_TIMEOUT; i++) {
3448 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3449 break;
3450 DELAY(10);
3451 }
3452
3453 /* Fix up byte swapping. */
3454 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3455 BGE_MODECTL_BYTESWAP_DATA);
3456
3457 /* Tell the ASF firmware we are up */
3458 if (sc->bge_asf_mode & ASF_STACKUP)
3459 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3460
3461 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3462
3463 /*
3464 * The 5704 in TBI mode apparently needs some special
3465 * adjustment to insure the SERDES drive level is set
3466 * to 1.2V.
3467 */
3468 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3469 sc->bge_flags & BGE_FLAG_TBI) {
3470 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3471 val = (val & ~0xFFF) | 0x880;
3472 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3473 }
3474
3475 /* XXX: Broadcom Linux driver. */
3476 if (sc->bge_flags & BGE_FLAG_PCIE &&
3477 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3478 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3479 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3480 /* Enable Data FIFO protection. */
3481 val = CSR_READ_4(sc, 0x7C00);
3482 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3483 }
3484 DELAY(10000);
3485
3486 return (0);
3487}
3488
3489static __inline void
3490bge_rxreuse_std(struct bge_softc *sc, int i)
3491{
3492 struct bge_rx_bd *r;
3493
3494 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3495 r->bge_flags = BGE_RXBDFLAG_END;
3496 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3497 r->bge_idx = i;
3498 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3499}
3500
3501static __inline void
3502bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3503{
3504 struct bge_extrx_bd *r;
3505
3506 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3507 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3508 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3509 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3510 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3511 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3512 r->bge_idx = i;
3513 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3514}
3515
3516/*
3517 * Frame reception handling. This is called if there's a frame
3518 * on the receive return list.
3519 *
3520 * Note: we have to be able to handle two possibilities here:
3521 * 1) the frame is from the jumbo receive ring
3522 * 2) the frame is from the standard receive ring
3523 */
3524
3525static int
3526bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3527{
3528 struct ifnet *ifp;
3529 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3530 uint16_t rx_cons;
3531
3532 rx_cons = sc->bge_rx_saved_considx;
3533
3534 /* Nothing to do. */
3535 if (rx_cons == rx_prod)
3536 return (rx_npkts);
3537
3538 ifp = sc->bge_ifp;
3539
3540 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3541 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3542 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3543 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3544 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3545 (MCLBYTES - ETHER_ALIGN))
3546 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3547 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3548
3549 while (rx_cons != rx_prod) {
3550 struct bge_rx_bd *cur_rx;
3551 uint32_t rxidx;
3552 struct mbuf *m = NULL;
3553 uint16_t vlan_tag = 0;
3554 int have_tag = 0;
3555
3556#ifdef DEVICE_POLLING
3557 if (ifp->if_capenable & IFCAP_POLLING) {
3558 if (sc->rxcycles <= 0)
3559 break;
3560 sc->rxcycles--;
3561 }
3562#endif
3563
3564 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3565
3566 rxidx = cur_rx->bge_idx;
3567 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3568
3569 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3570 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3571 have_tag = 1;
3572 vlan_tag = cur_rx->bge_vlan_tag;
3573 }
3574
3575 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3576 jumbocnt++;
3577 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3578 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3579 bge_rxreuse_jumbo(sc, rxidx);
3580 continue;
3581 }
3582 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3583 bge_rxreuse_jumbo(sc, rxidx);
3584 ifp->if_iqdrops++;
3585 continue;
3586 }
3587 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3588 } else {
3589 stdcnt++;
3590 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3591 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3592 bge_rxreuse_std(sc, rxidx);
3593 continue;
3594 }
3595 if (bge_newbuf_std(sc, rxidx) != 0) {
3596 bge_rxreuse_std(sc, rxidx);
3597 ifp->if_iqdrops++;
3598 continue;
3599 }
3600 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3601 }
3602
3603 ifp->if_ipackets++;
3604#ifndef __NO_STRICT_ALIGNMENT
3605 /*
3606 * For architectures with strict alignment we must make sure
3607 * the payload is aligned.
3608 */
3609 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3610 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3611 cur_rx->bge_len);
3612 m->m_data += ETHER_ALIGN;
3613 }
3614#endif
3615 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3616 m->m_pkthdr.rcvif = ifp;
3617
3618 if (ifp->if_capenable & IFCAP_RXCSUM)
3619 bge_rxcsum(sc, cur_rx, m);
3620
3621 /*
3622 * If we received a packet with a vlan tag,
3623 * attach that information to the packet.
3624 */
3625 if (have_tag) {
3626#if __FreeBSD_version > 700022
3627 m->m_pkthdr.ether_vtag = vlan_tag;
3628 m->m_flags |= M_VLANTAG;
3629#else
3630 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3631 if (m == NULL)
3632 continue;
3633#endif
3634 }
3635
3636 if (holdlck != 0) {
3637 BGE_UNLOCK(sc);
3638 (*ifp->if_input)(ifp, m);
3639 BGE_LOCK(sc);
3640 } else
3641 (*ifp->if_input)(ifp, m);
3642 rx_npkts++;
3643
3644 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3645 return (rx_npkts);
3646 }
3647
3648 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3649 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3650 if (stdcnt > 0)
3651 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3652 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3653
3654 if (jumbocnt > 0)
3655 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3656 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3657
3658 sc->bge_rx_saved_considx = rx_cons;
3659 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3660 if (stdcnt)
3661 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3662 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3663 if (jumbocnt)
3664 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3665 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3666#ifdef notyet
3667 /*
3668 * This register wraps very quickly under heavy packet drops.
3669 * If you need correct statistics, you can enable this check.
3670 */
3671 if (BGE_IS_5705_PLUS(sc))
3672 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3673#endif
3674 return (rx_npkts);
3675}
3676
3677static void
3678bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3679{
3680
3681 if (BGE_IS_5717_PLUS(sc)) {
3682 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3683 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3684 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3685 if ((cur_rx->bge_error_flag &
3686 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3687 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3688 }
3689 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3690 m->m_pkthdr.csum_data =
3691 cur_rx->bge_tcp_udp_csum;
3692 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3693 CSUM_PSEUDO_HDR;
3694 }
3695 }
3696 } else {
3697 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3698 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3699 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3700 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3701 }
3702 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3703 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3704 m->m_pkthdr.csum_data =
3705 cur_rx->bge_tcp_udp_csum;
3706 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3707 CSUM_PSEUDO_HDR;
3708 }
3709 }
3710}
3711
3712static void
3713bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3714{
3715 struct bge_tx_bd *cur_tx;
3716 struct ifnet *ifp;
3717
3718 BGE_LOCK_ASSERT(sc);
3719
3720 /* Nothing to do. */
3721 if (sc->bge_tx_saved_considx == tx_cons)
3722 return;
3723
3724 ifp = sc->bge_ifp;
3725
3726 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3727 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3728 /*
3729 * Go through our tx ring and free mbufs for those
3730 * frames that have been sent.
3731 */
3732 while (sc->bge_tx_saved_considx != tx_cons) {
3733 uint32_t idx;
3734
3735 idx = sc->bge_tx_saved_considx;
3736 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3737 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3738 ifp->if_opackets++;
3739 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3740 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3741 sc->bge_cdata.bge_tx_dmamap[idx],
3742 BUS_DMASYNC_POSTWRITE);
3743 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3744 sc->bge_cdata.bge_tx_dmamap[idx]);
3745 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3746 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3747 }
3748 sc->bge_txcnt--;
3749 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3750 }
3751
3752 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3753 if (sc->bge_txcnt == 0)
3754 sc->bge_timer = 0;
3755}
3756
3757#ifdef DEVICE_POLLING
3758static int
3759bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3760{
3761 struct bge_softc *sc = ifp->if_softc;
3762 uint16_t rx_prod, tx_cons;
3763 uint32_t statusword;
3764 int rx_npkts = 0;
3765
3766 BGE_LOCK(sc);
3767 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3768 BGE_UNLOCK(sc);
3769 return (rx_npkts);
3770 }
3771
3772 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3773 sc->bge_cdata.bge_status_map,
3774 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3775 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3776 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3777
3778 statusword = sc->bge_ldata.bge_status_block->bge_status;
3779 sc->bge_ldata.bge_status_block->bge_status = 0;
3780
3781 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3782 sc->bge_cdata.bge_status_map,
3783 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3784
3785 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3786 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3787 sc->bge_link_evt++;
3788
3789 if (cmd == POLL_AND_CHECK_STATUS)
3790 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3791 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3792 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3793 bge_link_upd(sc);
3794
3795 sc->rxcycles = count;
3796 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3797 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3798 BGE_UNLOCK(sc);
3799 return (rx_npkts);
3800 }
3801 bge_txeof(sc, tx_cons);
3802 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3803 bge_start_locked(ifp);
3804
3805 BGE_UNLOCK(sc);
3806 return (rx_npkts);
3807}
3808#endif /* DEVICE_POLLING */
3809
3810static int
3811bge_msi_intr(void *arg)
3812{
3813 struct bge_softc *sc;
3814
3815 sc = (struct bge_softc *)arg;
3816 /*
3817 * This interrupt is not shared and controller already
3818 * disabled further interrupt.
3819 */
3820 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3821 return (FILTER_HANDLED);
3822}
3823
3824static void
3825bge_intr_task(void *arg, int pending)
3826{
3827 struct bge_softc *sc;
3828 struct ifnet *ifp;
3829 uint32_t status, status_tag;
3830 uint16_t rx_prod, tx_cons;
3831
3832 sc = (struct bge_softc *)arg;
3833 ifp = sc->bge_ifp;
3834
3835 BGE_LOCK(sc);
3836 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3837 BGE_UNLOCK(sc);
3838 return;
3839 }
3840
3841 /* Get updated status block. */
3842 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3843 sc->bge_cdata.bge_status_map,
3844 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3845
3846 /* Save producer/consumer indexess. */
3847 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3848 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3849 status = sc->bge_ldata.bge_status_block->bge_status;
3850 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3851 sc->bge_ldata.bge_status_block->bge_status = 0;
3852 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3853 sc->bge_cdata.bge_status_map,
3854 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3855 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3856 status_tag = 0;
3857
3858 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3859 bge_link_upd(sc);
3860
3861 /* Let controller work. */
3862 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
3863
3864 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3865 sc->bge_rx_saved_considx != rx_prod) {
3866 /* Check RX return ring producer/consumer. */
3867 BGE_UNLOCK(sc);
3868 bge_rxeof(sc, rx_prod, 0);
3869 BGE_LOCK(sc);
3870 }
3871 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3872 /* Check TX ring producer/consumer. */
3873 bge_txeof(sc, tx_cons);
3874 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3875 bge_start_locked(ifp);
3876 }
3877 BGE_UNLOCK(sc);
3878}
3879
3880static void
3881bge_intr(void *xsc)
3882{
3883 struct bge_softc *sc;
3884 struct ifnet *ifp;
3885 uint32_t statusword;
3886 uint16_t rx_prod, tx_cons;
3887
3888 sc = xsc;
3889
3890 BGE_LOCK(sc);
3891
3892 ifp = sc->bge_ifp;
3893
3894#ifdef DEVICE_POLLING
3895 if (ifp->if_capenable & IFCAP_POLLING) {
3896 BGE_UNLOCK(sc);
3897 return;
3898 }
3899#endif
3900
3901 /*
3902 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3903 * disable interrupts by writing nonzero like we used to, since with
3904 * our current organization this just gives complications and
3905 * pessimizations for re-enabling interrupts. We used to have races
3906 * instead of the necessary complications. Disabling interrupts
3907 * would just reduce the chance of a status update while we are
3908 * running (by switching to the interrupt-mode coalescence
3909 * parameters), but this chance is already very low so it is more
3910 * efficient to get another interrupt than prevent it.
3911 *
3912 * We do the ack first to ensure another interrupt if there is a
3913 * status update after the ack. We don't check for the status
3914 * changing later because it is more efficient to get another
3915 * interrupt than prevent it, not quite as above (not checking is
3916 * a smaller optimization than not toggling the interrupt enable,
3917 * since checking doesn't involve PCI accesses and toggling require
3918 * the status check). So toggling would probably be a pessimization
3919 * even with MSI. It would only be needed for using a task queue.
3920 */
3921 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3922
3923 /*
3924 * Do the mandatory PCI flush as well as get the link status.
3925 */
3926 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3927
3928 /* Make sure the descriptor ring indexes are coherent. */
3929 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3930 sc->bge_cdata.bge_status_map,
3931 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3932 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3933 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3934 sc->bge_ldata.bge_status_block->bge_status = 0;
3935 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3936 sc->bge_cdata.bge_status_map,
3937 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3938
3939 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3940 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3941 statusword || sc->bge_link_evt)
3942 bge_link_upd(sc);
3943
3944 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3945 /* Check RX return ring producer/consumer. */
3946 bge_rxeof(sc, rx_prod, 1);
3947 }
3948
3949 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3950 /* Check TX ring producer/consumer. */
3951 bge_txeof(sc, tx_cons);
3952 }
3953
3954 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3955 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3956 bge_start_locked(ifp);
3957
3958 BGE_UNLOCK(sc);
3959}
3960
3961static void
3962bge_asf_driver_up(struct bge_softc *sc)
3963{
3964 if (sc->bge_asf_mode & ASF_STACKUP) {
3965 /* Send ASF heartbeat aprox. every 2s */
3966 if (sc->bge_asf_count)
3967 sc->bge_asf_count --;
3968 else {
3969 sc->bge_asf_count = 2;
3970 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3971 BGE_FW_DRV_ALIVE);
3972 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3973 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3974 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3975 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3976 }
3977 }
3978}
3979
3980static void
3981bge_tick(void *xsc)
3982{
3983 struct bge_softc *sc = xsc;
3984 struct mii_data *mii = NULL;
3985
3986 BGE_LOCK_ASSERT(sc);
3987
3988 /* Synchronize with possible callout reset/stop. */
3989 if (callout_pending(&sc->bge_stat_ch) ||
3990 !callout_active(&sc->bge_stat_ch))
3991 return;
3992
3993 if (BGE_IS_5705_PLUS(sc))
3994 bge_stats_update_regs(sc);
3995 else
3996 bge_stats_update(sc);
3997
3998 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3999 mii = device_get_softc(sc->bge_miibus);
4000 /*
4001 * Do not touch PHY if we have link up. This could break
4002 * IPMI/ASF mode or produce extra input errors
4003 * (extra errors was reported for bcm5701 & bcm5704).
4004 */
4005 if (!sc->bge_link)
4006 mii_tick(mii);
4007 } else {
4008 /*
4009 * Since in TBI mode auto-polling can't be used we should poll
4010 * link status manually. Here we register pending link event
4011 * and trigger interrupt.
4012 */
4013#ifdef DEVICE_POLLING
4014 /* In polling mode we poll link state in bge_poll(). */
4015 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4016#endif
4017 {
4018 sc->bge_link_evt++;
4019 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4020 sc->bge_flags & BGE_FLAG_5788)
4021 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4022 else
4023 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4024 }
4025 }
4026
4027 bge_asf_driver_up(sc);
4028 bge_watchdog(sc);
4029
4030 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4031}
4032
4033static void
4034bge_stats_update_regs(struct bge_softc *sc)
4035{
4036 struct ifnet *ifp;
4037 struct bge_mac_stats *stats;
4038
4039 ifp = sc->bge_ifp;
4040 stats = &sc->bge_mac_stats;
4041
4042 stats->ifHCOutOctets +=
4043 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4044 stats->etherStatsCollisions +=
4045 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4046 stats->outXonSent +=
4047 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4048 stats->outXoffSent +=
4049 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4050 stats->dot3StatsInternalMacTransmitErrors +=
4051 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4052 stats->dot3StatsSingleCollisionFrames +=
4053 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4054 stats->dot3StatsMultipleCollisionFrames +=
4055 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4056 stats->dot3StatsDeferredTransmissions +=
4057 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4058 stats->dot3StatsExcessiveCollisions +=
4059 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4060 stats->dot3StatsLateCollisions +=
4061 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4062 stats->ifHCOutUcastPkts +=
4063 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4064 stats->ifHCOutMulticastPkts +=
4065 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4066 stats->ifHCOutBroadcastPkts +=
4067 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4068
4069 stats->ifHCInOctets +=
4070 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4071 stats->etherStatsFragments +=
4072 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4073 stats->ifHCInUcastPkts +=
4074 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4075 stats->ifHCInMulticastPkts +=
4076 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4077 stats->ifHCInBroadcastPkts +=
4078 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4079 stats->dot3StatsFCSErrors +=
4080 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4081 stats->dot3StatsAlignmentErrors +=
4082 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4083 stats->xonPauseFramesReceived +=
4084 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4085 stats->xoffPauseFramesReceived +=
4086 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4087 stats->macControlFramesReceived +=
4088 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4089 stats->xoffStateEntered +=
4090 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4091 stats->dot3StatsFramesTooLong +=
4092 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4093 stats->etherStatsJabbers +=
4094 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4095 stats->etherStatsUndersizePkts +=
4096 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4097
4098 stats->FramesDroppedDueToFilters +=
4099 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4100 stats->DmaWriteQueueFull +=
4101 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4102 stats->DmaWriteHighPriQueueFull +=
4103 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4104 stats->NoMoreRxBDs +=
4105 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4106 stats->InputDiscards +=
4107 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4108 stats->InputErrors +=
4109 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4110 stats->RecvThresholdHit +=
4111 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4112
4113 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4114 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4115 stats->InputErrors);
4116}
4117
4118static void
4119bge_stats_clear_regs(struct bge_softc *sc)
4120{
4121
4122 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4123 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4124 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4125 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4126 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4127 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4128 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4129 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4130 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4131 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4132 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4133 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4134 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4135
4136 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4137 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4138 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4139 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4140 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4141 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4142 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4143 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4144 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4145 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4146 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4147 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4148 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4149 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4150
4151 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4152 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4153 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4154 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4155 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4156 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4157 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4158}
4159
4160static void
4161bge_stats_update(struct bge_softc *sc)
4162{
4163 struct ifnet *ifp;
4164 bus_size_t stats;
4165 uint32_t cnt; /* current register value */
4166
4167 ifp = sc->bge_ifp;
4168
4169 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4170
4171#define READ_STAT(sc, stats, stat) \
4172 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4173
4174 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4175 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4176 sc->bge_tx_collisions = cnt;
4177
4178 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4179 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4180 sc->bge_rx_discards = cnt;
4181
4182 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4183 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4184 sc->bge_tx_discards = cnt;
4185
4186#undef READ_STAT
4187}
4188
4189/*
4190 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4191 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4192 * but when such padded frames employ the bge IP/TCP checksum offload,
4193 * the hardware checksum assist gives incorrect results (possibly
4194 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4195 * If we pad such runts with zeros, the onboard checksum comes out correct.
4196 */
4197static __inline int
4198bge_cksum_pad(struct mbuf *m)
4199{
4200 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4201 struct mbuf *last;
4202
4203 /* If there's only the packet-header and we can pad there, use it. */
4204 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4205 M_TRAILINGSPACE(m) >= padlen) {
4206 last = m;
4207 } else {
4208 /*
4209 * Walk packet chain to find last mbuf. We will either
4210 * pad there, or append a new mbuf and pad it.
4211 */
4212 for (last = m; last->m_next != NULL; last = last->m_next);
4213 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4214 /* Allocate new empty mbuf, pad it. Compact later. */
4215 struct mbuf *n;
4216
4217 MGET(n, M_DONTWAIT, MT_DATA);
4218 if (n == NULL)
4219 return (ENOBUFS);
4220 n->m_len = 0;
4221 last->m_next = n;
4222 last = n;
4223 }
4224 }
4225
4226 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4227 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4228 last->m_len += padlen;
4229 m->m_pkthdr.len += padlen;
4230
4231 return (0);
4232}
4233
4234static struct mbuf *
4235bge_check_short_dma(struct mbuf *m)
4236{
4237 struct mbuf *n;
4238 int found;
4239
4240 /*
4241 * If device receive two back-to-back send BDs with less than
4242 * or equal to 8 total bytes then the device may hang. The two
4243 * back-to-back send BDs must in the same frame for this failure
4244 * to occur. Scan mbuf chains and see whether two back-to-back
4245 * send BDs are there. If this is the case, allocate new mbuf
4246 * and copy the frame to workaround the silicon bug.
4247 */
4248 for (n = m, found = 0; n != NULL; n = n->m_next) {
4249 if (n->m_len < 8) {
4250 found++;
4251 if (found > 1)
4252 break;
4253 continue;
4254 }
4255 found = 0;
4256 }
4257
4258 if (found > 1) {
4259 n = m_defrag(m, M_DONTWAIT);
4260 if (n == NULL)
4261 m_freem(m);
4262 } else
4263 n = m;
4264 return (n);
4265}
4266
4267static struct mbuf *
4268bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4269 uint16_t *flags)
4270{
4271 struct ip *ip;
4272 struct tcphdr *tcp;
4273 struct mbuf *n;
4274 uint16_t hlen;
4275 uint32_t poff;
4276
4277 if (M_WRITABLE(m) == 0) {
4278 /* Get a writable copy. */
4279 n = m_dup(m, M_DONTWAIT);
4280 m_freem(m);
4281 if (n == NULL)
4282 return (NULL);
4283 m = n;
4284 }
4285 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4286 if (m == NULL)
4287 return (NULL);
4288 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4289 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4290 m = m_pullup(m, poff + sizeof(struct tcphdr));
4291 if (m == NULL)
4292 return (NULL);
4293 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4294 m = m_pullup(m, poff + (tcp->th_off << 2));
4295 if (m == NULL)
4296 return (NULL);
4297 /*
4298 * It seems controller doesn't modify IP length and TCP pseudo
4299 * checksum. These checksum computed by upper stack should be 0.
4300 */
4301 *mss = m->m_pkthdr.tso_segsz;
4302 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4303 ip->ip_sum = 0;
4304 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4305 /* Clear pseudo checksum computed by TCP stack. */
4306 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4307 tcp->th_sum = 0;
4308 /*
4309 * Broadcom controllers uses different descriptor format for
4310 * TSO depending on ASIC revision. Due to TSO-capable firmware
4311 * license issue and lower performance of firmware based TSO
4312 * we only support hardware based TSO.
4313 */
4314 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4315 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4316 if (sc->bge_flags & BGE_FLAG_TSO3) {
4317 /*
4318 * For BCM5717 and newer controllers, hardware based TSO
4319 * uses the 14 lower bits of the bge_mss field to store the
4320 * MSS and the upper 2 bits to store the lowest 2 bits of
4321 * the IP/TCP header length. The upper 6 bits of the header
4322 * length are stored in the bge_flags[14:10,4] field. Jumbo
4323 * frames are supported.
4324 */
4325 *mss |= ((hlen & 0x3) << 14);
4326 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4327 } else {
4328 /*
4329 * For BCM5755 and newer controllers, hardware based TSO uses
4330 * the lower 11 bits to store the MSS and the upper 5 bits to
4331 * store the IP/TCP header length. Jumbo frames are not
4332 * supported.
4333 */
4334 *mss |= (hlen << 11);
4335 }
4336 return (m);
4337}
4338
4339/*
4340 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4341 * pointers to descriptors.
4342 */
4343static int
4344bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4345{
4346 bus_dma_segment_t segs[BGE_NSEG_NEW];
4347 bus_dmamap_t map;
4348 struct bge_tx_bd *d;
4349 struct mbuf *m = *m_head;
4350 uint32_t idx = *txidx;
4351 uint16_t csum_flags, mss, vlan_tag;
4352 int nsegs, i, error;
4353
4354 csum_flags = 0;
4355 mss = 0;
4356 vlan_tag = 0;
4357 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4358 m->m_next != NULL) {
4359 *m_head = bge_check_short_dma(m);
4360 if (*m_head == NULL)
4361 return (ENOBUFS);
4362 m = *m_head;
4363 }
4364 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4365 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4366 if (*m_head == NULL)
4367 return (ENOBUFS);
4368 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4369 BGE_TXBDFLAG_CPU_POST_DMA;
4370 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4371 if (m->m_pkthdr.csum_flags & CSUM_IP)
4372 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4373 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4374 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4375 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4376 (error = bge_cksum_pad(m)) != 0) {
4377 m_freem(m);
4378 *m_head = NULL;
4379 return (error);
4380 }
4381 }
4382 if (m->m_flags & M_LASTFRAG)
4383 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4384 else if (m->m_flags & M_FRAG)
4385 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4386 }
4387
4388 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4389 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4390 m->m_pkthdr.len > ETHER_MAX_LEN)
4391 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4392 if (sc->bge_forced_collapse > 0 &&
4393 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4394 /*
4395 * Forcedly collapse mbuf chains to overcome hardware
4396 * limitation which only support a single outstanding
4397 * DMA read operation.
4398 */
4399 if (sc->bge_forced_collapse == 1)
4400 m = m_defrag(m, M_DONTWAIT);
4401 else
4402 m = m_collapse(m, M_DONTWAIT,
4403 sc->bge_forced_collapse);
4404 if (m == NULL)
4405 m = *m_head;
4406 *m_head = m;
4407 }
4408 }
4409
4410 map = sc->bge_cdata.bge_tx_dmamap[idx];
4411 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4412 &nsegs, BUS_DMA_NOWAIT);
4413 if (error == EFBIG) {
4414 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4415 if (m == NULL) {
4416 m_freem(*m_head);
4417 *m_head = NULL;
4418 return (ENOBUFS);
4419 }
4420 *m_head = m;
4421 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4422 m, segs, &nsegs, BUS_DMA_NOWAIT);
4423 if (error) {
4424 m_freem(m);
4425 *m_head = NULL;
4426 return (error);
4427 }
4428 } else if (error != 0)
4429 return (error);
4430
4431 /* Check if we have enough free send BDs. */
4432 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4433 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4434 return (ENOBUFS);
4435 }
4436
4437 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4438
4439#if __FreeBSD_version > 700022
4440 if (m->m_flags & M_VLANTAG) {
4441 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4442 vlan_tag = m->m_pkthdr.ether_vtag;
4443 }
4444#else
4445 {
4446 struct m_tag *mtag;
4447
4448 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4449 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4450 vlan_tag = VLAN_TAG_VALUE(mtag);
4451 }
4452 }
4453#endif
4454 for (i = 0; ; i++) {
4455 d = &sc->bge_ldata.bge_tx_ring[idx];
4456 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4457 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4458 d->bge_len = segs[i].ds_len;
4459 d->bge_flags = csum_flags;
4460 d->bge_vlan_tag = vlan_tag;
4461 d->bge_mss = mss;
4462 if (i == nsegs - 1)
4463 break;
4464 BGE_INC(idx, BGE_TX_RING_CNT);
4465 }
4466
4467 /* Mark the last segment as end of packet... */
4468 d->bge_flags |= BGE_TXBDFLAG_END;
4469
4470 /*
4471 * Insure that the map for this transmission
4472 * is placed at the array index of the last descriptor
4473 * in this chain.
4474 */
4475 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4476 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4477 sc->bge_cdata.bge_tx_chain[idx] = m;
4478 sc->bge_txcnt += nsegs;
4479
4480 BGE_INC(idx, BGE_TX_RING_CNT);
4481 *txidx = idx;
4482
4483 return (0);
4484}
4485
4486/*
4487 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4488 * to the mbuf data regions directly in the transmit descriptors.
4489 */
4490static void
4491bge_start_locked(struct ifnet *ifp)
4492{
4493 struct bge_softc *sc;
4494 struct mbuf *m_head;
4495 uint32_t prodidx;
4496 int count;
4497
4498 sc = ifp->if_softc;
4499 BGE_LOCK_ASSERT(sc);
4500
4501 if (!sc->bge_link ||
4502 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4503 IFF_DRV_RUNNING)
4504 return;
4505
4506 prodidx = sc->bge_tx_prodidx;
4507
4508 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4509 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4510 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4511 break;
4512 }
4513 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4514 if (m_head == NULL)
4515 break;
4516
4517 /*
4518 * XXX
4519 * The code inside the if() block is never reached since we
4520 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4521 * requests to checksum TCP/UDP in a fragmented packet.
4522 *
4523 * XXX
4524 * safety overkill. If this is a fragmented packet chain
4525 * with delayed TCP/UDP checksums, then only encapsulate
4526 * it if we have enough descriptors to handle the entire
4527 * chain at once.
4528 * (paranoia -- may not actually be needed)
4529 */
4530 if (m_head->m_flags & M_FIRSTFRAG &&
4531 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4532 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4533 m_head->m_pkthdr.csum_data + 16) {
4534 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4535 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4536 break;
4537 }
4538 }
4539
4540 /*
4541 * Pack the data into the transmit ring. If we
4542 * don't have room, set the OACTIVE flag and wait
4543 * for the NIC to drain the ring.
4544 */
4545 if (bge_encap(sc, &m_head, &prodidx)) {
4546 if (m_head == NULL)
4547 break;
4548 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4549 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4550 break;
4551 }
4552 ++count;
4553
4554 /*
4555 * If there's a BPF listener, bounce a copy of this frame
4556 * to him.
4557 */
4558#ifdef ETHER_BPF_MTAP
4559 ETHER_BPF_MTAP(ifp, m_head);
4560#else
4561 BPF_MTAP(ifp, m_head);
4562#endif
4563 }
4564
4565 if (count > 0) {
4566 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4567 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4568 /* Transmit. */
4569 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4570 /* 5700 b2 errata */
4571 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4572 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4573
4574 sc->bge_tx_prodidx = prodidx;
4575
4576 /*
4577 * Set a timeout in case the chip goes out to lunch.
4578 */
4579 sc->bge_timer = 5;
4580 }
4581}
4582
4583/*
4584 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4585 * to the mbuf data regions directly in the transmit descriptors.
4586 */
4587static void
4588bge_start(struct ifnet *ifp)
4589{
4590 struct bge_softc *sc;
4591
4592 sc = ifp->if_softc;
4593 BGE_LOCK(sc);
4594 bge_start_locked(ifp);
4595 BGE_UNLOCK(sc);
4596}
4597
4598static void
4599bge_init_locked(struct bge_softc *sc)
4600{
4601 struct ifnet *ifp;
4602 uint16_t *m;
4603 uint32_t mode;
4604
4605 BGE_LOCK_ASSERT(sc);
4606
4607 ifp = sc->bge_ifp;
4608
4609 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4610 return;
4611
4612 /* Cancel pending I/O and flush buffers. */
4613 bge_stop(sc);
4614
4615 bge_stop_fw(sc);
4616 bge_sig_pre_reset(sc, BGE_RESET_START);
4617 bge_reset(sc);
4618 bge_sig_legacy(sc, BGE_RESET_START);
4619 bge_sig_post_reset(sc, BGE_RESET_START);
4620
4621 bge_chipinit(sc);
4622
4623 /*
4624 * Init the various state machines, ring
4625 * control blocks and firmware.
4626 */
4627 if (bge_blockinit(sc)) {
4628 device_printf(sc->bge_dev, "initialization failure\n");
4629 return;
4630 }
4631
4632 ifp = sc->bge_ifp;
4633
4634 /* Specify MTU. */
4635 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4636 ETHER_HDR_LEN + ETHER_CRC_LEN +
4637 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4638
4639 /* Load our MAC address. */
4640 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4641 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4642 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4643
4644 /* Program promiscuous mode. */
4645 bge_setpromisc(sc);
4646
4647 /* Program multicast filter. */
4648 bge_setmulti(sc);
4649
4650 /* Program VLAN tag stripping. */
4651 bge_setvlan(sc);
4652
4653 /* Override UDP checksum offloading. */
4654 if (sc->bge_forced_udpcsum == 0)
4655 sc->bge_csum_features &= ~CSUM_UDP;
4656 else
4657 sc->bge_csum_features |= CSUM_UDP;
4658 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4659 ifp->if_capenable & IFCAP_TXCSUM) {
4660 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4661 ifp->if_hwassist |= sc->bge_csum_features;
4662 }
4663
4664 /* Init RX ring. */
4665 if (bge_init_rx_ring_std(sc) != 0) {
4666 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4667 bge_stop(sc);
4668 return;
4669 }
4670
4671 /*
4672 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4673 * memory to insure that the chip has in fact read the first
4674 * entry of the ring.
4675 */
4676 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4677 uint32_t v, i;
4678 for (i = 0; i < 10; i++) {
4679 DELAY(20);
4680 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4681 if (v == (MCLBYTES - ETHER_ALIGN))
4682 break;
4683 }
4684 if (i == 10)
4685 device_printf (sc->bge_dev,
4686 "5705 A0 chip failed to load RX ring\n");
4687 }
4688
4689 /* Init jumbo RX ring. */
4690 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4691 (MCLBYTES - ETHER_ALIGN)) {
4692 if (bge_init_rx_ring_jumbo(sc) != 0) {
4693 device_printf(sc->bge_dev,
4694 "no memory for jumbo Rx buffers.\n");
4695 bge_stop(sc);
4696 return;
4697 }
4698 }
4699
4700 /* Init our RX return ring index. */
4701 sc->bge_rx_saved_considx = 0;
4702
4703 /* Init our RX/TX stat counters. */
4704 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4705
4706 /* Init TX ring. */
4707 bge_init_tx_ring(sc);
4708
4709 /* Enable TX MAC state machine lockup fix. */
4710 mode = CSR_READ_4(sc, BGE_TX_MODE);
4711 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4712 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4713 /* Turn on transmitter. */
4714 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4715
4716 /* Turn on receiver. */
4717 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4718
4719 /*
4720 * Set the number of good frames to receive after RX MBUF
4721 * Low Watermark has been reached. After the RX MAC receives
4722 * this number of frames, it will drop subsequent incoming
4723 * frames until the MBUF High Watermark is reached.
4724 */
4725 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4726
4727 /* Clear MAC statistics. */
4728 if (BGE_IS_5705_PLUS(sc))
4729 bge_stats_clear_regs(sc);
4730
4731 /* Tell firmware we're alive. */
4732 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4733
4734#ifdef DEVICE_POLLING
4735 /* Disable interrupts if we are polling. */
4736 if (ifp->if_capenable & IFCAP_POLLING) {
4737 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4738 BGE_PCIMISCCTL_MASK_PCI_INTR);
4739 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4740 } else
4741#endif
4742
4743 /* Enable host interrupts. */
4744 {
4745 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4746 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4747 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4748 }
4749
4750 bge_ifmedia_upd_locked(ifp);
4751
4752 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4753 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4754
4755 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4756}
4757
4758static void
4759bge_init(void *xsc)
4760{
4761 struct bge_softc *sc = xsc;
4762
4763 BGE_LOCK(sc);
4764 bge_init_locked(sc);
4765 BGE_UNLOCK(sc);
4766}
4767
4768/*
4769 * Set media options.
4770 */
4771static int
4772bge_ifmedia_upd(struct ifnet *ifp)
4773{
4774 struct bge_softc *sc = ifp->if_softc;
4775 int res;
4776
4777 BGE_LOCK(sc);
4778 res = bge_ifmedia_upd_locked(ifp);
4779 BGE_UNLOCK(sc);
4780
4781 return (res);
4782}
4783
4784static int
4785bge_ifmedia_upd_locked(struct ifnet *ifp)
4786{
4787 struct bge_softc *sc = ifp->if_softc;
4788 struct mii_data *mii;
4789 struct mii_softc *miisc;
4790 struct ifmedia *ifm;
4791
4792 BGE_LOCK_ASSERT(sc);
4793
4794 ifm = &sc->bge_ifmedia;
4795
4796 /* If this is a 1000baseX NIC, enable the TBI port. */
4797 if (sc->bge_flags & BGE_FLAG_TBI) {
4798 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4799 return (EINVAL);
4800 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4801 case IFM_AUTO:
4802 /*
4803 * The BCM5704 ASIC appears to have a special
4804 * mechanism for programming the autoneg
4805 * advertisement registers in TBI mode.
4806 */
4807 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4808 uint32_t sgdig;
4809 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4810 if (sgdig & BGE_SGDIGSTS_DONE) {
4811 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4812 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4813 sgdig |= BGE_SGDIGCFG_AUTO |
4814 BGE_SGDIGCFG_PAUSE_CAP |
4815 BGE_SGDIGCFG_ASYM_PAUSE;
4816 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4817 sgdig | BGE_SGDIGCFG_SEND);
4818 DELAY(5);
4819 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4820 }
4821 }
4822 break;
4823 case IFM_1000_SX:
4824 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4825 BGE_CLRBIT(sc, BGE_MAC_MODE,
4826 BGE_MACMODE_HALF_DUPLEX);
4827 } else {
4828 BGE_SETBIT(sc, BGE_MAC_MODE,
4829 BGE_MACMODE_HALF_DUPLEX);
4830 }
4831 break;
4832 default:
4833 return (EINVAL);
4834 }
4835 return (0);
4836 }
4837
4838 sc->bge_link_evt++;
4839 mii = device_get_softc(sc->bge_miibus);
4840 if (mii->mii_instance)
4841 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4842 mii_phy_reset(miisc);
4843 mii_mediachg(mii);
4844
4845 /*
4846 * Force an interrupt so that we will call bge_link_upd
4847 * if needed and clear any pending link state attention.
4848 * Without this we are not getting any further interrupts
4849 * for link state changes and thus will not UP the link and
4850 * not be able to send in bge_start_locked. The only
4851 * way to get things working was to receive a packet and
4852 * get an RX intr.
4853 * bge_tick should help for fiber cards and we might not
4854 * need to do this here if BGE_FLAG_TBI is set but as
4855 * we poll for fiber anyway it should not harm.
4856 */
4857 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4858 sc->bge_flags & BGE_FLAG_5788)
4859 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4860 else
4861 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4862
4863 return (0);
4864}
4865
4866/*
4867 * Report current media status.
4868 */
4869static void
4870bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4871{
4872 struct bge_softc *sc = ifp->if_softc;
4873 struct mii_data *mii;
4874
4875 BGE_LOCK(sc);
4876
4877 if (sc->bge_flags & BGE_FLAG_TBI) {
4878 ifmr->ifm_status = IFM_AVALID;
4879 ifmr->ifm_active = IFM_ETHER;
4880 if (CSR_READ_4(sc, BGE_MAC_STS) &
4881 BGE_MACSTAT_TBI_PCS_SYNCHED)
4882 ifmr->ifm_status |= IFM_ACTIVE;
4883 else {
4884 ifmr->ifm_active |= IFM_NONE;
4885 BGE_UNLOCK(sc);
4886 return;
4887 }
4888 ifmr->ifm_active |= IFM_1000_SX;
4889 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4890 ifmr->ifm_active |= IFM_HDX;
4891 else
4892 ifmr->ifm_active |= IFM_FDX;
4893 BGE_UNLOCK(sc);
4894 return;
4895 }
4896
4897 mii = device_get_softc(sc->bge_miibus);
4898 mii_pollstat(mii);
4899 ifmr->ifm_active = mii->mii_media_active;
4900 ifmr->ifm_status = mii->mii_media_status;
4901
4902 BGE_UNLOCK(sc);
4903}
4904
4905static int
4906bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4907{
4908 struct bge_softc *sc = ifp->if_softc;
4909 struct ifreq *ifr = (struct ifreq *) data;
4910 struct mii_data *mii;
4911 int flags, mask, error = 0;
4912
4913 switch (command) {
4914 case SIOCSIFMTU:
4915 BGE_LOCK(sc);
4916 if (ifr->ifr_mtu < ETHERMIN ||
4917 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4918 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4919 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4920 ifr->ifr_mtu > ETHERMTU))
4921 error = EINVAL;
4922 else if (ifp->if_mtu != ifr->ifr_mtu) {
4923 ifp->if_mtu = ifr->ifr_mtu;
4924 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4925 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4926 bge_init_locked(sc);
4927 }
4928 }
4929 BGE_UNLOCK(sc);
4930 break;
4931 case SIOCSIFFLAGS:
4932 BGE_LOCK(sc);
4933 if (ifp->if_flags & IFF_UP) {
4934 /*
4935 * If only the state of the PROMISC flag changed,
4936 * then just use the 'set promisc mode' command
4937 * instead of reinitializing the entire NIC. Doing
4938 * a full re-init means reloading the firmware and
4939 * waiting for it to start up, which may take a
4940 * second or two. Similarly for ALLMULTI.
4941 */
4942 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4943 flags = ifp->if_flags ^ sc->bge_if_flags;
4944 if (flags & IFF_PROMISC)
4945 bge_setpromisc(sc);
4946 if (flags & IFF_ALLMULTI)
4947 bge_setmulti(sc);
4948 } else
4949 bge_init_locked(sc);
4950 } else {
4951 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4952 bge_stop(sc);
4953 }
4954 }
4955 sc->bge_if_flags = ifp->if_flags;
4956 BGE_UNLOCK(sc);
4957 error = 0;
4958 break;
4959 case SIOCADDMULTI:
4960 case SIOCDELMULTI:
4961 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4962 BGE_LOCK(sc);
4963 bge_setmulti(sc);
4964 BGE_UNLOCK(sc);
4965 error = 0;
4966 }
4967 break;
4968 case SIOCSIFMEDIA:
4969 case SIOCGIFMEDIA:
4970 if (sc->bge_flags & BGE_FLAG_TBI) {
4971 error = ifmedia_ioctl(ifp, ifr,
4972 &sc->bge_ifmedia, command);
4973 } else {
4974 mii = device_get_softc(sc->bge_miibus);
4975 error = ifmedia_ioctl(ifp, ifr,
4976 &mii->mii_media, command);
4977 }
4978 break;
4979 case SIOCSIFCAP:
4980 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4981#ifdef DEVICE_POLLING
4982 if (mask & IFCAP_POLLING) {
4983 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4984 error = ether_poll_register(bge_poll, ifp);
4985 if (error)
4986 return (error);
4987 BGE_LOCK(sc);
4988 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4989 BGE_PCIMISCCTL_MASK_PCI_INTR);
4990 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4991 ifp->if_capenable |= IFCAP_POLLING;
4992 BGE_UNLOCK(sc);
4993 } else {
4994 error = ether_poll_deregister(ifp);
4995 /* Enable interrupt even in error case */
4996 BGE_LOCK(sc);
4997 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4998 BGE_PCIMISCCTL_MASK_PCI_INTR);
4999 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5000 ifp->if_capenable &= ~IFCAP_POLLING;
5001 BGE_UNLOCK(sc);
5002 }
5003 }
5004#endif
5005 if ((mask & IFCAP_TXCSUM) != 0 &&
5006 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5007 ifp->if_capenable ^= IFCAP_TXCSUM;
5008 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5009 ifp->if_hwassist |= sc->bge_csum_features;
5010 else
5011 ifp->if_hwassist &= ~sc->bge_csum_features;
5012 }
5013
5014 if ((mask & IFCAP_RXCSUM) != 0 &&
5015 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5016 ifp->if_capenable ^= IFCAP_RXCSUM;
5017
5018 if ((mask & IFCAP_TSO4) != 0 &&
5019 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5020 ifp->if_capenable ^= IFCAP_TSO4;
5021 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5022 ifp->if_hwassist |= CSUM_TSO;
5023 else
5024 ifp->if_hwassist &= ~CSUM_TSO;
5025 }
5026
5027 if (mask & IFCAP_VLAN_MTU) {
5028 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5029 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5030 bge_init(sc);
5031 }
5032
5033 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5034 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5035 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5036 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5037 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5038 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5039 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5040 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5041 BGE_LOCK(sc);
5042 bge_setvlan(sc);
5043 BGE_UNLOCK(sc);
5044 }
5045#ifdef VLAN_CAPABILITIES
5046 VLAN_CAPABILITIES(ifp);
5047#endif
5048 break;
5049 default:
5050 error = ether_ioctl(ifp, command, data);
5051 break;
5052 }
5053
5054 return (error);
5055}
5056
5057static void
5058bge_watchdog(struct bge_softc *sc)
5059{
5060 struct ifnet *ifp;
5061
5062 BGE_LOCK_ASSERT(sc);
5063
5064 if (sc->bge_timer == 0 || --sc->bge_timer)
5065 return;
5066
5067 ifp = sc->bge_ifp;
5068
5069 if_printf(ifp, "watchdog timeout -- resetting\n");
5070
5071 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5072 bge_init_locked(sc);
5073
5074 ifp->if_oerrors++;
5075}
5076
5077/*
5078 * Stop the adapter and free any mbufs allocated to the
5079 * RX and TX lists.
5080 */
5081static void
5082bge_stop(struct bge_softc *sc)
5083{
5084 struct ifnet *ifp;
5085
5086 BGE_LOCK_ASSERT(sc);
5087
5088 ifp = sc->bge_ifp;
5089
5090 callout_stop(&sc->bge_stat_ch);
5091
5092 /* Disable host interrupts. */
5093 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5094 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5095
5096 /*
5097 * Tell firmware we're shutting down.
5098 */
5099 bge_stop_fw(sc);
5100 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5101
5102 /*
5103 * Disable all of the receiver blocks.
5104 */
5105 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5106 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5107 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5108 if (!(BGE_IS_5705_PLUS(sc)))
5109 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5110 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5111 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5112 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5113
5114 /*
5115 * Disable all of the transmit blocks.
5116 */
5117 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5118 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5119 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5120 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5121 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5122 if (!(BGE_IS_5705_PLUS(sc)))
5123 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5124 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5125
5126 /*
5127 * Shut down all of the memory managers and related
5128 * state machines.
5129 */
5130 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5131 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5132 if (!(BGE_IS_5705_PLUS(sc)))
5133 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5134 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5135 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5136 if (!(BGE_IS_5705_PLUS(sc))) {
5137 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5138 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5139 }
5140 /* Update MAC statistics. */
5141 if (BGE_IS_5705_PLUS(sc))
5142 bge_stats_update_regs(sc);
5143
5144 bge_reset(sc);
5145 bge_sig_legacy(sc, BGE_RESET_STOP);
5146 bge_sig_post_reset(sc, BGE_RESET_STOP);
5147
5148 /*
5149 * Keep the ASF firmware running if up.
5150 */
5151 if (sc->bge_asf_mode & ASF_STACKUP)
5152 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5153 else
5154 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5155
5156 /* Free the RX lists. */
5157 bge_free_rx_ring_std(sc);
5158
5159 /* Free jumbo RX list. */
5160 if (BGE_IS_JUMBO_CAPABLE(sc))
5161 bge_free_rx_ring_jumbo(sc);
5162
5163 /* Free TX buffers. */
5164 bge_free_tx_ring(sc);
5165
5166 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5167
5168 /* Clear MAC's link state (PHY may still have link UP). */
5169 if (bootverbose && sc->bge_link)
5170 if_printf(sc->bge_ifp, "link DOWN\n");
5171 sc->bge_link = 0;
5172
5173 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5174}
5175
5176/*
5177 * Stop all chip I/O so that the kernel's probe routines don't
5178 * get confused by errant DMAs when rebooting.
5179 */
5180static int
5181bge_shutdown(device_t dev)
5182{
5183 struct bge_softc *sc;
5184
5185 sc = device_get_softc(dev);
5186 BGE_LOCK(sc);
5187 bge_stop(sc);
5188 bge_reset(sc);
5189 BGE_UNLOCK(sc);
5190
5191 return (0);
5192}
5193
5194static int
5195bge_suspend(device_t dev)
5196{
5197 struct bge_softc *sc;
5198
5199 sc = device_get_softc(dev);
5200 BGE_LOCK(sc);
5201 bge_stop(sc);
5202 BGE_UNLOCK(sc);
5203
5204 return (0);
5205}
5206
5207static int
5208bge_resume(device_t dev)
5209{
5210 struct bge_softc *sc;
5211 struct ifnet *ifp;
5212
5213 sc = device_get_softc(dev);
5214 BGE_LOCK(sc);
5215 ifp = sc->bge_ifp;
5216 if (ifp->if_flags & IFF_UP) {
5217 bge_init_locked(sc);
5218 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5219 bge_start_locked(ifp);
5220 }
5221 BGE_UNLOCK(sc);
5222
5223 return (0);
5224}
5225
5226static void
5227bge_link_upd(struct bge_softc *sc)
5228{
5229 struct mii_data *mii;
5230 uint32_t link, status;
5231
5232 BGE_LOCK_ASSERT(sc);
5233
5234 /* Clear 'pending link event' flag. */
5235 sc->bge_link_evt = 0;
5236
5237 /*
5238 * Process link state changes.
5239 * Grrr. The link status word in the status block does
5240 * not work correctly on the BCM5700 rev AX and BX chips,
5241 * according to all available information. Hence, we have
5242 * to enable MII interrupts in order to properly obtain
5243 * async link changes. Unfortunately, this also means that
5244 * we have to read the MAC status register to detect link
5245 * changes, thereby adding an additional register access to
5246 * the interrupt handler.
5247 *
5248 * XXX: perhaps link state detection procedure used for
5249 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5250 */
5251
5252 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5253 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5254 status = CSR_READ_4(sc, BGE_MAC_STS);
5255 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5256 mii = device_get_softc(sc->bge_miibus);
5257 mii_pollstat(mii);
5258 if (!sc->bge_link &&
5259 mii->mii_media_status & IFM_ACTIVE &&
5260 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5261 sc->bge_link++;
5262 if (bootverbose)
5263 if_printf(sc->bge_ifp, "link UP\n");
5264 } else if (sc->bge_link &&
5265 (!(mii->mii_media_status & IFM_ACTIVE) ||
5266 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5267 sc->bge_link = 0;
5268 if (bootverbose)
5269 if_printf(sc->bge_ifp, "link DOWN\n");
5270 }
5271
5272 /* Clear the interrupt. */
5273 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5274 BGE_EVTENB_MI_INTERRUPT);
5275 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5276 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5277 BRGPHY_INTRS);
5278 }
5279 return;
5280 }
5281
5282 if (sc->bge_flags & BGE_FLAG_TBI) {
5283 status = CSR_READ_4(sc, BGE_MAC_STS);
5284 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5285 if (!sc->bge_link) {
5286 sc->bge_link++;
5287 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5288 BGE_CLRBIT(sc, BGE_MAC_MODE,
5289 BGE_MACMODE_TBI_SEND_CFGS);
5290 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5291 if (bootverbose)
5292 if_printf(sc->bge_ifp, "link UP\n");
5293 if_link_state_change(sc->bge_ifp,
5294 LINK_STATE_UP);
5295 }
5296 } else if (sc->bge_link) {
5297 sc->bge_link = 0;
5298 if (bootverbose)
5299 if_printf(sc->bge_ifp, "link DOWN\n");
5300 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5301 }
5302 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5303 /*
5304 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5305 * in status word always set. Workaround this bug by reading
5306 * PHY link status directly.
5307 */
5308 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5309
5310 if (link != sc->bge_link ||
5311 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5312 mii = device_get_softc(sc->bge_miibus);
5313 mii_pollstat(mii);
5314 if (!sc->bge_link &&
5315 mii->mii_media_status & IFM_ACTIVE &&
5316 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5317 sc->bge_link++;
5318 if (bootverbose)
5319 if_printf(sc->bge_ifp, "link UP\n");
5320 } else if (sc->bge_link &&
5321 (!(mii->mii_media_status & IFM_ACTIVE) ||
5322 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5323 sc->bge_link = 0;
5324 if (bootverbose)
5325 if_printf(sc->bge_ifp, "link DOWN\n");
5326 }
5327 }
5328 } else {
5329 /*
5330 * For controllers that call mii_tick, we have to poll
5331 * link status.
5332 */
5333 mii = device_get_softc(sc->bge_miibus);
5334 mii_pollstat(mii);
5335 bge_miibus_statchg(sc->bge_dev);
5336 }
5337
5338 /* Clear the attention. */
5339 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5340 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5341 BGE_MACSTAT_LINK_CHANGED);
5342}
5343
5344static void
5345bge_add_sysctls(struct bge_softc *sc)
5346{
5347 struct sysctl_ctx_list *ctx;
5348 struct sysctl_oid_list *children;
5349 char tn[32];
5350 int unit;
5351
5352 ctx = device_get_sysctl_ctx(sc->bge_dev);
5353 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5354
5355#ifdef BGE_REGISTER_DEBUG
5356 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5357 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5358 "Debug Information");
5359
5360 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5361 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5362 "Register Read");
5363
5364 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5365 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5366 "Memory Read");
5367
5368#endif
5369
5370 unit = device_get_unit(sc->bge_dev);
5371 /*
5372 * A common design characteristic for many Broadcom client controllers
5373 * is that they only support a single outstanding DMA read operation
5374 * on the PCIe bus. This means that it will take twice as long to fetch
5375 * a TX frame that is split into header and payload buffers as it does
5376 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5377 * these controllers, coalescing buffers to reduce the number of memory
5378 * reads is effective way to get maximum performance(about 940Mbps).
5379 * Without collapsing TX buffers the maximum TCP bulk transfer
5380 * performance is about 850Mbps. However forcing coalescing mbufs
5381 * consumes a lot of CPU cycles, so leave it off by default.
5382 */
5383 sc->bge_forced_collapse = 0;
5384 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5385 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5386 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5387 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5388 "Number of fragmented TX buffers of a frame allowed before "
5389 "forced collapsing");
5390
5391 /*
5392 * It seems all Broadcom controllers have a bug that can generate UDP
5393 * datagrams with checksum value 0 when TX UDP checksum offloading is
5394 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5395 * Even though the probability of generating such UDP datagrams is
5396 * low, I don't want to see FreeBSD boxes to inject such datagrams
5397 * into network so disable UDP checksum offloading by default. Users
5398 * still override this behavior by setting a sysctl variable,
5399 * dev.bge.0.forced_udpcsum.
5400 */
5401 sc->bge_forced_udpcsum = 0;
5402 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5403 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5404 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5405 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5406 "Enable UDP checksum offloading even if controller can "
5407 "generate UDP checksum value 0");
5408
5409 if (BGE_IS_5705_PLUS(sc))
5410 bge_add_sysctl_stats_regs(sc, ctx, children);
5411 else
5412 bge_add_sysctl_stats(sc, ctx, children);
5413}
5414
5415#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5416 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5417 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5418 desc)
5419
5420static void
5421bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5422 struct sysctl_oid_list *parent)
5423{
5424 struct sysctl_oid *tree;
5425 struct sysctl_oid_list *children, *schildren;
5426
5427 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5428 NULL, "BGE Statistics");
5429 schildren = children = SYSCTL_CHILDREN(tree);
5430 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5431 children, COSFramesDroppedDueToFilters,
5432 "FramesDroppedDueToFilters");
5433 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5434 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5435 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5436 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5437 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5438 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5439 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5440 children, ifInDiscards, "InputDiscards");
5441 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5442 children, ifInErrors, "InputErrors");
5443 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5444 children, nicRecvThresholdHit, "RecvThresholdHit");
5445 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5446 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5447 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5448 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5449 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5450 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5451 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5452 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5453 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5454 children, nicRingStatusUpdate, "RingStatusUpdate");
5455 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5456 children, nicInterrupts, "Interrupts");
5457 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5458 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5459 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5460 children, nicSendThresholdHit, "SendThresholdHit");
5461
5462 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5463 NULL, "BGE RX Statistics");
5464 children = SYSCTL_CHILDREN(tree);
5465 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5466 children, rxstats.ifHCInOctets, "ifHCInOctets");
5467 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5468 children, rxstats.etherStatsFragments, "Fragments");
5469 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5470 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5471 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5472 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5473 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5474 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5475 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5476 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5477 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5478 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5479 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5480 children, rxstats.xoffPauseFramesReceived,
5481 "xoffPauseFramesReceived");
5482 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5483 children, rxstats.macControlFramesReceived,
5484 "ControlFramesReceived");
5485 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5486 children, rxstats.xoffStateEntered, "xoffStateEntered");
5487 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5488 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5489 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5490 children, rxstats.etherStatsJabbers, "Jabbers");
5491 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5492 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5493 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5494 children, rxstats.inRangeLengthError, "inRangeLengthError");
5495 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5496 children, rxstats.outRangeLengthError, "outRangeLengthError");
5497
5498 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5499 NULL, "BGE TX Statistics");
5500 children = SYSCTL_CHILDREN(tree);
5501 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5502 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5503 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5504 children, txstats.etherStatsCollisions, "Collisions");
5505 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5506 children, txstats.outXonSent, "XonSent");
5507 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5508 children, txstats.outXoffSent, "XoffSent");
5509 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5510 children, txstats.flowControlDone, "flowControlDone");
5511 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5512 children, txstats.dot3StatsInternalMacTransmitErrors,
5513 "InternalMacTransmitErrors");
5514 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5515 children, txstats.dot3StatsSingleCollisionFrames,
5516 "SingleCollisionFrames");
5517 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5518 children, txstats.dot3StatsMultipleCollisionFrames,
5519 "MultipleCollisionFrames");
5520 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5521 children, txstats.dot3StatsDeferredTransmissions,
5522 "DeferredTransmissions");
5523 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5524 children, txstats.dot3StatsExcessiveCollisions,
5525 "ExcessiveCollisions");
5526 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5527 children, txstats.dot3StatsLateCollisions,
5528 "LateCollisions");
5529 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5530 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5531 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5532 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5533 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5534 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5535 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5536 children, txstats.dot3StatsCarrierSenseErrors,
5537 "CarrierSenseErrors");
5538 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5539 children, txstats.ifOutDiscards, "Discards");
5540 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5541 children, txstats.ifOutErrors, "Errors");
5542}
5543
5544#undef BGE_SYSCTL_STAT
5545
5546#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5547 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5548
5549static void
5550bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5551 struct sysctl_oid_list *parent)
5552{
5553 struct sysctl_oid *tree;
5554 struct sysctl_oid_list *child, *schild;
5555 struct bge_mac_stats *stats;
5556
5557 stats = &sc->bge_mac_stats;
5558 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5559 NULL, "BGE Statistics");
5560 schild = child = SYSCTL_CHILDREN(tree);
5561 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5562 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5563 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5564 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5565 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5566 &stats->DmaWriteHighPriQueueFull,
5567 "NIC DMA Write High Priority Queue Full");
5568 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5569 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5570 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5571 &stats->InputDiscards, "Discarded Input Frames");
5572 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5573 &stats->InputErrors, "Input Errors");
5574 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5575 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5576
5577 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5578 NULL, "BGE RX Statistics");
5579 child = SYSCTL_CHILDREN(tree);
5580 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5581 &stats->ifHCInOctets, "Inbound Octets");
5582 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5583 &stats->etherStatsFragments, "Fragments");
5584 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5585 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5586 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5587 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5588 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5589 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5590 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5591 &stats->dot3StatsFCSErrors, "FCS Errors");
5592 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5593 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5594 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5595 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5596 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5597 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5598 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5599 &stats->macControlFramesReceived, "MAC Control Frames Received");
5600 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5601 &stats->xoffStateEntered, "XOFF State Entered");
5602 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5603 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5604 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5605 &stats->etherStatsJabbers, "Jabbers");
5606 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5607 &stats->etherStatsUndersizePkts, "Undersized Packets");
5608
5609 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5610 NULL, "BGE TX Statistics");
5611 child = SYSCTL_CHILDREN(tree);
5612 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5613 &stats->ifHCOutOctets, "Outbound Octets");
5614 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5615 &stats->etherStatsCollisions, "TX Collisions");
5616 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5617 &stats->outXonSent, "XON Sent");
5618 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5619 &stats->outXoffSent, "XOFF Sent");
5620 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5621 &stats->dot3StatsInternalMacTransmitErrors,
5622 "Internal MAC TX Errors");
5623 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5624 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5625 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5626 &stats->dot3StatsMultipleCollisionFrames,
5627 "Multiple Collision Frames");
5628 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5629 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5630 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5631 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5632 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5633 &stats->dot3StatsLateCollisions, "Late Collisions");
5634 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5635 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5636 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5637 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5638 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5639 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5640}
5641
5642#undef BGE_SYSCTL_STAT_ADD64
5643
5644static int
5645bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5646{
5647 struct bge_softc *sc;
5648 uint32_t result;
5649 int offset;
5650
5651 sc = (struct bge_softc *)arg1;
5652 offset = arg2;
5653 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5654 offsetof(bge_hostaddr, bge_addr_lo));
5655 return (sysctl_handle_int(oidp, &result, 0, req));
5656}
5657
5658#ifdef BGE_REGISTER_DEBUG
5659static int
5660bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5661{
5662 struct bge_softc *sc;
5663 uint16_t *sbdata;
5664 int error;
5665 int result;
5666 int i, j;
5667
5668 result = -1;
5669 error = sysctl_handle_int(oidp, &result, 0, req);
5670 if (error || (req->newptr == NULL))
5671 return (error);
5672
5673 if (result == 1) {
5674 sc = (struct bge_softc *)arg1;
5675
5676 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5677 printf("Status Block:\n");
5678 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5679 printf("%06x:", i);
5680 for (j = 0; j < 8; j++) {
5681 printf(" %04x", sbdata[i]);
5682 i += 4;
5683 }
5684 printf("\n");
5685 }
5686
5687 printf("Registers:\n");
5688 for (i = 0x800; i < 0xA00; ) {
5689 printf("%06x:", i);
5690 for (j = 0; j < 8; j++) {
5691 printf(" %08x", CSR_READ_4(sc, i));
5692 i += 4;
5693 }
5694 printf("\n");
5695 }
5696
5697 printf("Hardware Flags:\n");
5698 if (BGE_IS_5755_PLUS(sc))
5699 printf(" - 5755 Plus\n");
5700 if (BGE_IS_575X_PLUS(sc))
5701 printf(" - 575X Plus\n");
5702 if (BGE_IS_5705_PLUS(sc))
5703 printf(" - 5705 Plus\n");
5704 if (BGE_IS_5714_FAMILY(sc))
5705 printf(" - 5714 Family\n");
5706 if (BGE_IS_5700_FAMILY(sc))
5707 printf(" - 5700 Family\n");
5708 if (sc->bge_flags & BGE_FLAG_JUMBO)
5709 printf(" - Supports Jumbo Frames\n");
5710 if (sc->bge_flags & BGE_FLAG_PCIX)
5711 printf(" - PCI-X Bus\n");
5712 if (sc->bge_flags & BGE_FLAG_PCIE)
5713 printf(" - PCI Express Bus\n");
5714 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5715 printf(" - No 3 LEDs\n");
5716 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5717 printf(" - RX Alignment Bug\n");
5718 }
5719
5720 return (error);
5721}
5722
5723static int
5724bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5725{
5726 struct bge_softc *sc;
5727 int error;
5728 uint16_t result;
5729 uint32_t val;
5730
5731 result = -1;
5732 error = sysctl_handle_int(oidp, &result, 0, req);
5733 if (error || (req->newptr == NULL))
5734 return (error);
5735
5736 if (result < 0x8000) {
5737 sc = (struct bge_softc *)arg1;
5738 val = CSR_READ_4(sc, result);
5739 printf("reg 0x%06X = 0x%08X\n", result, val);
5740 }
5741
5742 return (error);
5743}
5744
5745static int
5746bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5747{
5748 struct bge_softc *sc;
5749 int error;
5750 uint16_t result;
5751 uint32_t val;
5752
5753 result = -1;
5754 error = sysctl_handle_int(oidp, &result, 0, req);
5755 if (error || (req->newptr == NULL))
5756 return (error);
5757
5758 if (result < 0x8000) {
5759 sc = (struct bge_softc *)arg1;
5760 val = bge_readmem_ind(sc, result);
5761 printf("mem 0x%06X = 0x%08X\n", result, val);
5762 }
5763
5764 return (error);
5765}
5766#endif
5767
5768static int
5769bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5770{
5771
5772 if (sc->bge_flags & BGE_FLAG_EADDR)
5773 return (1);
5774
5775#ifdef __sparc64__
5776 OF_getetheraddr(sc->bge_dev, ether_addr);
5777 return (0);
5778#endif
5779 return (1);
5780}
5781
5782static int
5783bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5784{
5785 uint32_t mac_addr;
5786
5787 mac_addr = bge_readmem_ind(sc, 0x0c14);
5788 if ((mac_addr >> 16) == 0x484b) {
5789 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5790 ether_addr[1] = (uint8_t)mac_addr;
5791 mac_addr = bge_readmem_ind(sc, 0x0c18);
5792 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5793 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5794 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5795 ether_addr[5] = (uint8_t)mac_addr;
5796 return (0);
5797 }
5798 return (1);
5799}
5800
5801static int
5802bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5803{
5804 int mac_offset = BGE_EE_MAC_OFFSET;
5805
5806 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5807 mac_offset = BGE_EE_MAC_OFFSET_5906;
5808
5809 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5810 ETHER_ADDR_LEN));
5811}
5812
5813static int
5814bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5815{
5816
5817 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5818 return (1);
5819
5820 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5821 ETHER_ADDR_LEN));
5822}
5823
5824static int
5825bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5826{
5827 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5828 /* NOTE: Order is critical */
5829 bge_get_eaddr_fw,
5830 bge_get_eaddr_mem,
5831 bge_get_eaddr_nvram,
5832 bge_get_eaddr_eeprom,
5833 NULL
5834 };
5835 const bge_eaddr_fcn_t *func;
5836
5837 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5838 if ((*func)(sc, eaddr) == 0)
5839 break;
5840 }
5841 return (*func == NULL ? ENXIO : 0);
5842}
3110 if (error != 0) {
3111 if (trys++ < 4) {
3112 device_printf(sc->bge_dev, "Try again\n");
3113 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3114 BMCR_RESET);
3115 goto again;
3116 }
3117 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3118 goto fail;
3119 }
3120
3121 /*
3122 * Now tell the firmware we are going up after probing the PHY
3123 */
3124 if (sc->bge_asf_mode & ASF_STACKUP)
3125 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3126 }
3127
3128 /*
3129 * When using the BCM5701 in PCI-X mode, data corruption has
3130 * been observed in the first few bytes of some received packets.
3131 * Aligning the packet buffer in memory eliminates the corruption.
3132 * Unfortunately, this misaligns the packet payloads. On platforms
3133 * which do not support unaligned accesses, we will realign the
3134 * payloads by copying the received packets.
3135 */
3136 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3137 sc->bge_flags & BGE_FLAG_PCIX)
3138 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3139
3140 /*
3141 * Call MI attach routine.
3142 */
3143 ether_ifattach(ifp, eaddr);
3144 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3145
3146 /* Tell upper layer we support long frames. */
3147 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3148
3149 /*
3150 * Hookup IRQ last.
3151 */
3152#if __FreeBSD_version > 700030
3153 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3154 /* Take advantage of single-shot MSI. */
3155 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3156 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3157 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3158 taskqueue_thread_enqueue, &sc->bge_tq);
3159 if (sc->bge_tq == NULL) {
3160 device_printf(dev, "could not create taskqueue.\n");
3161 ether_ifdetach(ifp);
3162 error = ENXIO;
3163 goto fail;
3164 }
3165 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3166 device_get_nameunit(sc->bge_dev));
3167 error = bus_setup_intr(dev, sc->bge_irq,
3168 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3169 &sc->bge_intrhand);
3170 if (error)
3171 ether_ifdetach(ifp);
3172 } else
3173 error = bus_setup_intr(dev, sc->bge_irq,
3174 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3175 &sc->bge_intrhand);
3176#else
3177 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
3178 bge_intr, sc, &sc->bge_intrhand);
3179#endif
3180
3181 if (error) {
3182 bge_detach(dev);
3183 device_printf(sc->bge_dev, "couldn't set up irq\n");
3184 }
3185
3186 return (0);
3187
3188fail:
3189 bge_release_resources(sc);
3190
3191 return (error);
3192}
3193
3194static int
3195bge_detach(device_t dev)
3196{
3197 struct bge_softc *sc;
3198 struct ifnet *ifp;
3199
3200 sc = device_get_softc(dev);
3201 ifp = sc->bge_ifp;
3202
3203#ifdef DEVICE_POLLING
3204 if (ifp->if_capenable & IFCAP_POLLING)
3205 ether_poll_deregister(ifp);
3206#endif
3207
3208 BGE_LOCK(sc);
3209 bge_stop(sc);
3210 bge_reset(sc);
3211 BGE_UNLOCK(sc);
3212
3213 callout_drain(&sc->bge_stat_ch);
3214
3215 if (sc->bge_tq)
3216 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3217 ether_ifdetach(ifp);
3218
3219 if (sc->bge_flags & BGE_FLAG_TBI) {
3220 ifmedia_removeall(&sc->bge_ifmedia);
3221 } else {
3222 bus_generic_detach(dev);
3223 device_delete_child(dev, sc->bge_miibus);
3224 }
3225
3226 bge_release_resources(sc);
3227
3228 return (0);
3229}
3230
3231static void
3232bge_release_resources(struct bge_softc *sc)
3233{
3234 device_t dev;
3235
3236 dev = sc->bge_dev;
3237
3238 if (sc->bge_tq != NULL)
3239 taskqueue_free(sc->bge_tq);
3240
3241 if (sc->bge_intrhand != NULL)
3242 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3243
3244 if (sc->bge_irq != NULL)
3245 bus_release_resource(dev, SYS_RES_IRQ,
3246 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3247
3248 if (sc->bge_flags & BGE_FLAG_MSI)
3249 pci_release_msi(dev);
3250
3251 if (sc->bge_res != NULL)
3252 bus_release_resource(dev, SYS_RES_MEMORY,
3253 PCIR_BAR(0), sc->bge_res);
3254
3255 if (sc->bge_ifp != NULL)
3256 if_free(sc->bge_ifp);
3257
3258 bge_dma_free(sc);
3259
3260 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3261 BGE_LOCK_DESTROY(sc);
3262}
3263
3264static int
3265bge_reset(struct bge_softc *sc)
3266{
3267 device_t dev;
3268 uint32_t cachesize, command, pcistate, reset, val;
3269 void (*write_op)(struct bge_softc *, int, int);
3270 uint16_t devctl;
3271 int i;
3272
3273 dev = sc->bge_dev;
3274
3275 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3276 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3277 if (sc->bge_flags & BGE_FLAG_PCIE)
3278 write_op = bge_writemem_direct;
3279 else
3280 write_op = bge_writemem_ind;
3281 } else
3282 write_op = bge_writereg_ind;
3283
3284 /* Save some important PCI state. */
3285 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3286 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3287 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3288
3289 pci_write_config(dev, BGE_PCI_MISC_CTL,
3290 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3291 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3292
3293 /* Disable fastboot on controllers that support it. */
3294 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3295 BGE_IS_5755_PLUS(sc)) {
3296 if (bootverbose)
3297 device_printf(dev, "Disabling fastboot\n");
3298 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3299 }
3300
3301 /*
3302 * Write the magic number to SRAM at offset 0xB50.
3303 * When firmware finishes its initialization it will
3304 * write ~BGE_MAGIC_NUMBER to the same location.
3305 */
3306 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3307
3308 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3309
3310 /* XXX: Broadcom Linux driver. */
3311 if (sc->bge_flags & BGE_FLAG_PCIE) {
3312 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3313 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3314 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3315 /* Prevent PCIE link training during global reset */
3316 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3317 reset |= 1 << 29;
3318 }
3319 }
3320
3321 /*
3322 * Set GPHY Power Down Override to leave GPHY
3323 * powered up in D0 uninitialized.
3324 */
3325 if (BGE_IS_5705_PLUS(sc))
3326 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3327
3328 /* Issue global reset */
3329 write_op(sc, BGE_MISC_CFG, reset);
3330
3331 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3332 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3333 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3334 val | BGE_VCPU_STATUS_DRV_RESET);
3335 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3336 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3337 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3338 }
3339
3340 DELAY(1000);
3341
3342 /* XXX: Broadcom Linux driver. */
3343 if (sc->bge_flags & BGE_FLAG_PCIE) {
3344 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3345 DELAY(500000); /* wait for link training to complete */
3346 val = pci_read_config(dev, 0xC4, 4);
3347 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3348 }
3349 devctl = pci_read_config(dev,
3350 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3351 /* Clear enable no snoop and disable relaxed ordering. */
3352 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3353 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3354 /* Set PCIE max payload size to 128. */
3355 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3356 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3357 devctl, 2);
3358 /* Clear error status. */
3359 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3360 PCIM_EXP_STA_CORRECTABLE_ERROR |
3361 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3362 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3363 }
3364
3365 /* Reset some of the PCI state that got zapped by reset. */
3366 pci_write_config(dev, BGE_PCI_MISC_CTL,
3367 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3368 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3369 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3370 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3371 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3372 /*
3373 * Disable PCI-X relaxed ordering to ensure status block update
3374 * comes first then packet buffer DMA. Otherwise driver may
3375 * read stale status block.
3376 */
3377 if (sc->bge_flags & BGE_FLAG_PCIX) {
3378 devctl = pci_read_config(dev,
3379 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3380 devctl &= ~PCIXM_COMMAND_ERO;
3381 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3382 devctl &= ~PCIXM_COMMAND_MAX_READ;
3383 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3384 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3385 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3386 PCIXM_COMMAND_MAX_READ);
3387 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3388 }
3389 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3390 devctl, 2);
3391 }
3392 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3393 if (BGE_IS_5714_FAMILY(sc)) {
3394 /* This chip disables MSI on reset. */
3395 if (sc->bge_flags & BGE_FLAG_MSI) {
3396 val = pci_read_config(dev,
3397 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3398 pci_write_config(dev,
3399 sc->bge_msicap + PCIR_MSI_CTRL,
3400 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3401 val = CSR_READ_4(sc, BGE_MSI_MODE);
3402 CSR_WRITE_4(sc, BGE_MSI_MODE,
3403 val | BGE_MSIMODE_ENABLE);
3404 }
3405 val = CSR_READ_4(sc, BGE_MARB_MODE);
3406 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3407 } else
3408 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3409
3410 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3411 for (i = 0; i < BGE_TIMEOUT; i++) {
3412 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3413 if (val & BGE_VCPU_STATUS_INIT_DONE)
3414 break;
3415 DELAY(100);
3416 }
3417 if (i == BGE_TIMEOUT) {
3418 device_printf(dev, "reset timed out\n");
3419 return (1);
3420 }
3421 } else {
3422 /*
3423 * Poll until we see the 1's complement of the magic number.
3424 * This indicates that the firmware initialization is complete.
3425 * We expect this to fail if no chip containing the Ethernet
3426 * address is fitted though.
3427 */
3428 for (i = 0; i < BGE_TIMEOUT; i++) {
3429 DELAY(10);
3430 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3431 if (val == ~BGE_MAGIC_NUMBER)
3432 break;
3433 }
3434
3435 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3436 device_printf(dev,
3437 "firmware handshake timed out, found 0x%08x\n",
3438 val);
3439 }
3440
3441 /*
3442 * XXX Wait for the value of the PCISTATE register to
3443 * return to its original pre-reset state. This is a
3444 * fairly good indicator of reset completion. If we don't
3445 * wait for the reset to fully complete, trying to read
3446 * from the device's non-PCI registers may yield garbage
3447 * results.
3448 */
3449 for (i = 0; i < BGE_TIMEOUT; i++) {
3450 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3451 break;
3452 DELAY(10);
3453 }
3454
3455 /* Fix up byte swapping. */
3456 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3457 BGE_MODECTL_BYTESWAP_DATA);
3458
3459 /* Tell the ASF firmware we are up */
3460 if (sc->bge_asf_mode & ASF_STACKUP)
3461 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3462
3463 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3464
3465 /*
3466 * The 5704 in TBI mode apparently needs some special
3467 * adjustment to insure the SERDES drive level is set
3468 * to 1.2V.
3469 */
3470 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3471 sc->bge_flags & BGE_FLAG_TBI) {
3472 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3473 val = (val & ~0xFFF) | 0x880;
3474 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3475 }
3476
3477 /* XXX: Broadcom Linux driver. */
3478 if (sc->bge_flags & BGE_FLAG_PCIE &&
3479 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3480 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3481 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3482 /* Enable Data FIFO protection. */
3483 val = CSR_READ_4(sc, 0x7C00);
3484 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3485 }
3486 DELAY(10000);
3487
3488 return (0);
3489}
3490
3491static __inline void
3492bge_rxreuse_std(struct bge_softc *sc, int i)
3493{
3494 struct bge_rx_bd *r;
3495
3496 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3497 r->bge_flags = BGE_RXBDFLAG_END;
3498 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3499 r->bge_idx = i;
3500 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3501}
3502
3503static __inline void
3504bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3505{
3506 struct bge_extrx_bd *r;
3507
3508 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3509 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3510 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3511 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3512 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3513 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3514 r->bge_idx = i;
3515 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3516}
3517
3518/*
3519 * Frame reception handling. This is called if there's a frame
3520 * on the receive return list.
3521 *
3522 * Note: we have to be able to handle two possibilities here:
3523 * 1) the frame is from the jumbo receive ring
3524 * 2) the frame is from the standard receive ring
3525 */
3526
3527static int
3528bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3529{
3530 struct ifnet *ifp;
3531 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3532 uint16_t rx_cons;
3533
3534 rx_cons = sc->bge_rx_saved_considx;
3535
3536 /* Nothing to do. */
3537 if (rx_cons == rx_prod)
3538 return (rx_npkts);
3539
3540 ifp = sc->bge_ifp;
3541
3542 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3543 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3544 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3545 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3546 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3547 (MCLBYTES - ETHER_ALIGN))
3548 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3549 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3550
3551 while (rx_cons != rx_prod) {
3552 struct bge_rx_bd *cur_rx;
3553 uint32_t rxidx;
3554 struct mbuf *m = NULL;
3555 uint16_t vlan_tag = 0;
3556 int have_tag = 0;
3557
3558#ifdef DEVICE_POLLING
3559 if (ifp->if_capenable & IFCAP_POLLING) {
3560 if (sc->rxcycles <= 0)
3561 break;
3562 sc->rxcycles--;
3563 }
3564#endif
3565
3566 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3567
3568 rxidx = cur_rx->bge_idx;
3569 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3570
3571 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3572 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3573 have_tag = 1;
3574 vlan_tag = cur_rx->bge_vlan_tag;
3575 }
3576
3577 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3578 jumbocnt++;
3579 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3580 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3581 bge_rxreuse_jumbo(sc, rxidx);
3582 continue;
3583 }
3584 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3585 bge_rxreuse_jumbo(sc, rxidx);
3586 ifp->if_iqdrops++;
3587 continue;
3588 }
3589 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3590 } else {
3591 stdcnt++;
3592 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3593 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3594 bge_rxreuse_std(sc, rxidx);
3595 continue;
3596 }
3597 if (bge_newbuf_std(sc, rxidx) != 0) {
3598 bge_rxreuse_std(sc, rxidx);
3599 ifp->if_iqdrops++;
3600 continue;
3601 }
3602 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3603 }
3604
3605 ifp->if_ipackets++;
3606#ifndef __NO_STRICT_ALIGNMENT
3607 /*
3608 * For architectures with strict alignment we must make sure
3609 * the payload is aligned.
3610 */
3611 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3612 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3613 cur_rx->bge_len);
3614 m->m_data += ETHER_ALIGN;
3615 }
3616#endif
3617 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3618 m->m_pkthdr.rcvif = ifp;
3619
3620 if (ifp->if_capenable & IFCAP_RXCSUM)
3621 bge_rxcsum(sc, cur_rx, m);
3622
3623 /*
3624 * If we received a packet with a vlan tag,
3625 * attach that information to the packet.
3626 */
3627 if (have_tag) {
3628#if __FreeBSD_version > 700022
3629 m->m_pkthdr.ether_vtag = vlan_tag;
3630 m->m_flags |= M_VLANTAG;
3631#else
3632 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3633 if (m == NULL)
3634 continue;
3635#endif
3636 }
3637
3638 if (holdlck != 0) {
3639 BGE_UNLOCK(sc);
3640 (*ifp->if_input)(ifp, m);
3641 BGE_LOCK(sc);
3642 } else
3643 (*ifp->if_input)(ifp, m);
3644 rx_npkts++;
3645
3646 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3647 return (rx_npkts);
3648 }
3649
3650 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3651 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3652 if (stdcnt > 0)
3653 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3654 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3655
3656 if (jumbocnt > 0)
3657 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3658 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3659
3660 sc->bge_rx_saved_considx = rx_cons;
3661 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3662 if (stdcnt)
3663 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3664 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3665 if (jumbocnt)
3666 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3667 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3668#ifdef notyet
3669 /*
3670 * This register wraps very quickly under heavy packet drops.
3671 * If you need correct statistics, you can enable this check.
3672 */
3673 if (BGE_IS_5705_PLUS(sc))
3674 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3675#endif
3676 return (rx_npkts);
3677}
3678
3679static void
3680bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3681{
3682
3683 if (BGE_IS_5717_PLUS(sc)) {
3684 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3685 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3686 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3687 if ((cur_rx->bge_error_flag &
3688 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3689 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3690 }
3691 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3692 m->m_pkthdr.csum_data =
3693 cur_rx->bge_tcp_udp_csum;
3694 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3695 CSUM_PSEUDO_HDR;
3696 }
3697 }
3698 } else {
3699 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3700 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3701 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3702 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3703 }
3704 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3705 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3706 m->m_pkthdr.csum_data =
3707 cur_rx->bge_tcp_udp_csum;
3708 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3709 CSUM_PSEUDO_HDR;
3710 }
3711 }
3712}
3713
3714static void
3715bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3716{
3717 struct bge_tx_bd *cur_tx;
3718 struct ifnet *ifp;
3719
3720 BGE_LOCK_ASSERT(sc);
3721
3722 /* Nothing to do. */
3723 if (sc->bge_tx_saved_considx == tx_cons)
3724 return;
3725
3726 ifp = sc->bge_ifp;
3727
3728 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3729 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3730 /*
3731 * Go through our tx ring and free mbufs for those
3732 * frames that have been sent.
3733 */
3734 while (sc->bge_tx_saved_considx != tx_cons) {
3735 uint32_t idx;
3736
3737 idx = sc->bge_tx_saved_considx;
3738 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3739 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3740 ifp->if_opackets++;
3741 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3742 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3743 sc->bge_cdata.bge_tx_dmamap[idx],
3744 BUS_DMASYNC_POSTWRITE);
3745 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3746 sc->bge_cdata.bge_tx_dmamap[idx]);
3747 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3748 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3749 }
3750 sc->bge_txcnt--;
3751 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3752 }
3753
3754 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3755 if (sc->bge_txcnt == 0)
3756 sc->bge_timer = 0;
3757}
3758
3759#ifdef DEVICE_POLLING
3760static int
3761bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3762{
3763 struct bge_softc *sc = ifp->if_softc;
3764 uint16_t rx_prod, tx_cons;
3765 uint32_t statusword;
3766 int rx_npkts = 0;
3767
3768 BGE_LOCK(sc);
3769 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3770 BGE_UNLOCK(sc);
3771 return (rx_npkts);
3772 }
3773
3774 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3775 sc->bge_cdata.bge_status_map,
3776 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3777 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3778 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3779
3780 statusword = sc->bge_ldata.bge_status_block->bge_status;
3781 sc->bge_ldata.bge_status_block->bge_status = 0;
3782
3783 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3784 sc->bge_cdata.bge_status_map,
3785 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3786
3787 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3788 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3789 sc->bge_link_evt++;
3790
3791 if (cmd == POLL_AND_CHECK_STATUS)
3792 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3793 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3794 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3795 bge_link_upd(sc);
3796
3797 sc->rxcycles = count;
3798 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3799 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3800 BGE_UNLOCK(sc);
3801 return (rx_npkts);
3802 }
3803 bge_txeof(sc, tx_cons);
3804 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3805 bge_start_locked(ifp);
3806
3807 BGE_UNLOCK(sc);
3808 return (rx_npkts);
3809}
3810#endif /* DEVICE_POLLING */
3811
3812static int
3813bge_msi_intr(void *arg)
3814{
3815 struct bge_softc *sc;
3816
3817 sc = (struct bge_softc *)arg;
3818 /*
3819 * This interrupt is not shared and controller already
3820 * disabled further interrupt.
3821 */
3822 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3823 return (FILTER_HANDLED);
3824}
3825
3826static void
3827bge_intr_task(void *arg, int pending)
3828{
3829 struct bge_softc *sc;
3830 struct ifnet *ifp;
3831 uint32_t status, status_tag;
3832 uint16_t rx_prod, tx_cons;
3833
3834 sc = (struct bge_softc *)arg;
3835 ifp = sc->bge_ifp;
3836
3837 BGE_LOCK(sc);
3838 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3839 BGE_UNLOCK(sc);
3840 return;
3841 }
3842
3843 /* Get updated status block. */
3844 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3845 sc->bge_cdata.bge_status_map,
3846 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3847
3848 /* Save producer/consumer indexess. */
3849 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3850 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3851 status = sc->bge_ldata.bge_status_block->bge_status;
3852 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3853 sc->bge_ldata.bge_status_block->bge_status = 0;
3854 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3855 sc->bge_cdata.bge_status_map,
3856 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3857 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3858 status_tag = 0;
3859
3860 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3861 bge_link_upd(sc);
3862
3863 /* Let controller work. */
3864 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
3865
3866 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3867 sc->bge_rx_saved_considx != rx_prod) {
3868 /* Check RX return ring producer/consumer. */
3869 BGE_UNLOCK(sc);
3870 bge_rxeof(sc, rx_prod, 0);
3871 BGE_LOCK(sc);
3872 }
3873 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3874 /* Check TX ring producer/consumer. */
3875 bge_txeof(sc, tx_cons);
3876 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3877 bge_start_locked(ifp);
3878 }
3879 BGE_UNLOCK(sc);
3880}
3881
3882static void
3883bge_intr(void *xsc)
3884{
3885 struct bge_softc *sc;
3886 struct ifnet *ifp;
3887 uint32_t statusword;
3888 uint16_t rx_prod, tx_cons;
3889
3890 sc = xsc;
3891
3892 BGE_LOCK(sc);
3893
3894 ifp = sc->bge_ifp;
3895
3896#ifdef DEVICE_POLLING
3897 if (ifp->if_capenable & IFCAP_POLLING) {
3898 BGE_UNLOCK(sc);
3899 return;
3900 }
3901#endif
3902
3903 /*
3904 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3905 * disable interrupts by writing nonzero like we used to, since with
3906 * our current organization this just gives complications and
3907 * pessimizations for re-enabling interrupts. We used to have races
3908 * instead of the necessary complications. Disabling interrupts
3909 * would just reduce the chance of a status update while we are
3910 * running (by switching to the interrupt-mode coalescence
3911 * parameters), but this chance is already very low so it is more
3912 * efficient to get another interrupt than prevent it.
3913 *
3914 * We do the ack first to ensure another interrupt if there is a
3915 * status update after the ack. We don't check for the status
3916 * changing later because it is more efficient to get another
3917 * interrupt than prevent it, not quite as above (not checking is
3918 * a smaller optimization than not toggling the interrupt enable,
3919 * since checking doesn't involve PCI accesses and toggling require
3920 * the status check). So toggling would probably be a pessimization
3921 * even with MSI. It would only be needed for using a task queue.
3922 */
3923 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3924
3925 /*
3926 * Do the mandatory PCI flush as well as get the link status.
3927 */
3928 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3929
3930 /* Make sure the descriptor ring indexes are coherent. */
3931 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3932 sc->bge_cdata.bge_status_map,
3933 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3934 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3935 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3936 sc->bge_ldata.bge_status_block->bge_status = 0;
3937 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3938 sc->bge_cdata.bge_status_map,
3939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3940
3941 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3942 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3943 statusword || sc->bge_link_evt)
3944 bge_link_upd(sc);
3945
3946 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3947 /* Check RX return ring producer/consumer. */
3948 bge_rxeof(sc, rx_prod, 1);
3949 }
3950
3951 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3952 /* Check TX ring producer/consumer. */
3953 bge_txeof(sc, tx_cons);
3954 }
3955
3956 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3957 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3958 bge_start_locked(ifp);
3959
3960 BGE_UNLOCK(sc);
3961}
3962
3963static void
3964bge_asf_driver_up(struct bge_softc *sc)
3965{
3966 if (sc->bge_asf_mode & ASF_STACKUP) {
3967 /* Send ASF heartbeat aprox. every 2s */
3968 if (sc->bge_asf_count)
3969 sc->bge_asf_count --;
3970 else {
3971 sc->bge_asf_count = 2;
3972 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3973 BGE_FW_DRV_ALIVE);
3974 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3975 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3976 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3977 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3978 }
3979 }
3980}
3981
3982static void
3983bge_tick(void *xsc)
3984{
3985 struct bge_softc *sc = xsc;
3986 struct mii_data *mii = NULL;
3987
3988 BGE_LOCK_ASSERT(sc);
3989
3990 /* Synchronize with possible callout reset/stop. */
3991 if (callout_pending(&sc->bge_stat_ch) ||
3992 !callout_active(&sc->bge_stat_ch))
3993 return;
3994
3995 if (BGE_IS_5705_PLUS(sc))
3996 bge_stats_update_regs(sc);
3997 else
3998 bge_stats_update(sc);
3999
4000 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4001 mii = device_get_softc(sc->bge_miibus);
4002 /*
4003 * Do not touch PHY if we have link up. This could break
4004 * IPMI/ASF mode or produce extra input errors
4005 * (extra errors was reported for bcm5701 & bcm5704).
4006 */
4007 if (!sc->bge_link)
4008 mii_tick(mii);
4009 } else {
4010 /*
4011 * Since in TBI mode auto-polling can't be used we should poll
4012 * link status manually. Here we register pending link event
4013 * and trigger interrupt.
4014 */
4015#ifdef DEVICE_POLLING
4016 /* In polling mode we poll link state in bge_poll(). */
4017 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4018#endif
4019 {
4020 sc->bge_link_evt++;
4021 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4022 sc->bge_flags & BGE_FLAG_5788)
4023 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4024 else
4025 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4026 }
4027 }
4028
4029 bge_asf_driver_up(sc);
4030 bge_watchdog(sc);
4031
4032 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4033}
4034
4035static void
4036bge_stats_update_regs(struct bge_softc *sc)
4037{
4038 struct ifnet *ifp;
4039 struct bge_mac_stats *stats;
4040
4041 ifp = sc->bge_ifp;
4042 stats = &sc->bge_mac_stats;
4043
4044 stats->ifHCOutOctets +=
4045 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4046 stats->etherStatsCollisions +=
4047 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4048 stats->outXonSent +=
4049 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4050 stats->outXoffSent +=
4051 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4052 stats->dot3StatsInternalMacTransmitErrors +=
4053 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4054 stats->dot3StatsSingleCollisionFrames +=
4055 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4056 stats->dot3StatsMultipleCollisionFrames +=
4057 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4058 stats->dot3StatsDeferredTransmissions +=
4059 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4060 stats->dot3StatsExcessiveCollisions +=
4061 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4062 stats->dot3StatsLateCollisions +=
4063 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4064 stats->ifHCOutUcastPkts +=
4065 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4066 stats->ifHCOutMulticastPkts +=
4067 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4068 stats->ifHCOutBroadcastPkts +=
4069 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4070
4071 stats->ifHCInOctets +=
4072 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4073 stats->etherStatsFragments +=
4074 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4075 stats->ifHCInUcastPkts +=
4076 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4077 stats->ifHCInMulticastPkts +=
4078 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4079 stats->ifHCInBroadcastPkts +=
4080 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4081 stats->dot3StatsFCSErrors +=
4082 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4083 stats->dot3StatsAlignmentErrors +=
4084 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4085 stats->xonPauseFramesReceived +=
4086 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4087 stats->xoffPauseFramesReceived +=
4088 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4089 stats->macControlFramesReceived +=
4090 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4091 stats->xoffStateEntered +=
4092 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4093 stats->dot3StatsFramesTooLong +=
4094 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4095 stats->etherStatsJabbers +=
4096 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4097 stats->etherStatsUndersizePkts +=
4098 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4099
4100 stats->FramesDroppedDueToFilters +=
4101 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4102 stats->DmaWriteQueueFull +=
4103 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4104 stats->DmaWriteHighPriQueueFull +=
4105 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4106 stats->NoMoreRxBDs +=
4107 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4108 stats->InputDiscards +=
4109 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4110 stats->InputErrors +=
4111 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4112 stats->RecvThresholdHit +=
4113 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4114
4115 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4116 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4117 stats->InputErrors);
4118}
4119
4120static void
4121bge_stats_clear_regs(struct bge_softc *sc)
4122{
4123
4124 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4125 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4126 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4127 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4128 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4129 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4130 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4131 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4132 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4133 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4134 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4135 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4136 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4137
4138 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4139 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4140 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4141 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4142 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4143 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4144 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4145 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4146 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4147 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4148 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4149 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4150 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4151 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4152
4153 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4154 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4155 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4156 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4157 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4158 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4159 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4160}
4161
4162static void
4163bge_stats_update(struct bge_softc *sc)
4164{
4165 struct ifnet *ifp;
4166 bus_size_t stats;
4167 uint32_t cnt; /* current register value */
4168
4169 ifp = sc->bge_ifp;
4170
4171 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4172
4173#define READ_STAT(sc, stats, stat) \
4174 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4175
4176 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4177 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4178 sc->bge_tx_collisions = cnt;
4179
4180 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4181 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4182 sc->bge_rx_discards = cnt;
4183
4184 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4185 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4186 sc->bge_tx_discards = cnt;
4187
4188#undef READ_STAT
4189}
4190
4191/*
4192 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4193 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4194 * but when such padded frames employ the bge IP/TCP checksum offload,
4195 * the hardware checksum assist gives incorrect results (possibly
4196 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4197 * If we pad such runts with zeros, the onboard checksum comes out correct.
4198 */
4199static __inline int
4200bge_cksum_pad(struct mbuf *m)
4201{
4202 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4203 struct mbuf *last;
4204
4205 /* If there's only the packet-header and we can pad there, use it. */
4206 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4207 M_TRAILINGSPACE(m) >= padlen) {
4208 last = m;
4209 } else {
4210 /*
4211 * Walk packet chain to find last mbuf. We will either
4212 * pad there, or append a new mbuf and pad it.
4213 */
4214 for (last = m; last->m_next != NULL; last = last->m_next);
4215 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4216 /* Allocate new empty mbuf, pad it. Compact later. */
4217 struct mbuf *n;
4218
4219 MGET(n, M_DONTWAIT, MT_DATA);
4220 if (n == NULL)
4221 return (ENOBUFS);
4222 n->m_len = 0;
4223 last->m_next = n;
4224 last = n;
4225 }
4226 }
4227
4228 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4229 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4230 last->m_len += padlen;
4231 m->m_pkthdr.len += padlen;
4232
4233 return (0);
4234}
4235
4236static struct mbuf *
4237bge_check_short_dma(struct mbuf *m)
4238{
4239 struct mbuf *n;
4240 int found;
4241
4242 /*
4243 * If device receive two back-to-back send BDs with less than
4244 * or equal to 8 total bytes then the device may hang. The two
4245 * back-to-back send BDs must in the same frame for this failure
4246 * to occur. Scan mbuf chains and see whether two back-to-back
4247 * send BDs are there. If this is the case, allocate new mbuf
4248 * and copy the frame to workaround the silicon bug.
4249 */
4250 for (n = m, found = 0; n != NULL; n = n->m_next) {
4251 if (n->m_len < 8) {
4252 found++;
4253 if (found > 1)
4254 break;
4255 continue;
4256 }
4257 found = 0;
4258 }
4259
4260 if (found > 1) {
4261 n = m_defrag(m, M_DONTWAIT);
4262 if (n == NULL)
4263 m_freem(m);
4264 } else
4265 n = m;
4266 return (n);
4267}
4268
4269static struct mbuf *
4270bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4271 uint16_t *flags)
4272{
4273 struct ip *ip;
4274 struct tcphdr *tcp;
4275 struct mbuf *n;
4276 uint16_t hlen;
4277 uint32_t poff;
4278
4279 if (M_WRITABLE(m) == 0) {
4280 /* Get a writable copy. */
4281 n = m_dup(m, M_DONTWAIT);
4282 m_freem(m);
4283 if (n == NULL)
4284 return (NULL);
4285 m = n;
4286 }
4287 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4288 if (m == NULL)
4289 return (NULL);
4290 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4291 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4292 m = m_pullup(m, poff + sizeof(struct tcphdr));
4293 if (m == NULL)
4294 return (NULL);
4295 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4296 m = m_pullup(m, poff + (tcp->th_off << 2));
4297 if (m == NULL)
4298 return (NULL);
4299 /*
4300 * It seems controller doesn't modify IP length and TCP pseudo
4301 * checksum. These checksum computed by upper stack should be 0.
4302 */
4303 *mss = m->m_pkthdr.tso_segsz;
4304 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4305 ip->ip_sum = 0;
4306 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4307 /* Clear pseudo checksum computed by TCP stack. */
4308 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4309 tcp->th_sum = 0;
4310 /*
4311 * Broadcom controllers uses different descriptor format for
4312 * TSO depending on ASIC revision. Due to TSO-capable firmware
4313 * license issue and lower performance of firmware based TSO
4314 * we only support hardware based TSO.
4315 */
4316 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4317 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4318 if (sc->bge_flags & BGE_FLAG_TSO3) {
4319 /*
4320 * For BCM5717 and newer controllers, hardware based TSO
4321 * uses the 14 lower bits of the bge_mss field to store the
4322 * MSS and the upper 2 bits to store the lowest 2 bits of
4323 * the IP/TCP header length. The upper 6 bits of the header
4324 * length are stored in the bge_flags[14:10,4] field. Jumbo
4325 * frames are supported.
4326 */
4327 *mss |= ((hlen & 0x3) << 14);
4328 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4329 } else {
4330 /*
4331 * For BCM5755 and newer controllers, hardware based TSO uses
4332 * the lower 11 bits to store the MSS and the upper 5 bits to
4333 * store the IP/TCP header length. Jumbo frames are not
4334 * supported.
4335 */
4336 *mss |= (hlen << 11);
4337 }
4338 return (m);
4339}
4340
4341/*
4342 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4343 * pointers to descriptors.
4344 */
4345static int
4346bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4347{
4348 bus_dma_segment_t segs[BGE_NSEG_NEW];
4349 bus_dmamap_t map;
4350 struct bge_tx_bd *d;
4351 struct mbuf *m = *m_head;
4352 uint32_t idx = *txidx;
4353 uint16_t csum_flags, mss, vlan_tag;
4354 int nsegs, i, error;
4355
4356 csum_flags = 0;
4357 mss = 0;
4358 vlan_tag = 0;
4359 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4360 m->m_next != NULL) {
4361 *m_head = bge_check_short_dma(m);
4362 if (*m_head == NULL)
4363 return (ENOBUFS);
4364 m = *m_head;
4365 }
4366 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4367 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4368 if (*m_head == NULL)
4369 return (ENOBUFS);
4370 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4371 BGE_TXBDFLAG_CPU_POST_DMA;
4372 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4373 if (m->m_pkthdr.csum_flags & CSUM_IP)
4374 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4375 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4376 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4377 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4378 (error = bge_cksum_pad(m)) != 0) {
4379 m_freem(m);
4380 *m_head = NULL;
4381 return (error);
4382 }
4383 }
4384 if (m->m_flags & M_LASTFRAG)
4385 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4386 else if (m->m_flags & M_FRAG)
4387 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4388 }
4389
4390 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4391 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4392 m->m_pkthdr.len > ETHER_MAX_LEN)
4393 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4394 if (sc->bge_forced_collapse > 0 &&
4395 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4396 /*
4397 * Forcedly collapse mbuf chains to overcome hardware
4398 * limitation which only support a single outstanding
4399 * DMA read operation.
4400 */
4401 if (sc->bge_forced_collapse == 1)
4402 m = m_defrag(m, M_DONTWAIT);
4403 else
4404 m = m_collapse(m, M_DONTWAIT,
4405 sc->bge_forced_collapse);
4406 if (m == NULL)
4407 m = *m_head;
4408 *m_head = m;
4409 }
4410 }
4411
4412 map = sc->bge_cdata.bge_tx_dmamap[idx];
4413 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4414 &nsegs, BUS_DMA_NOWAIT);
4415 if (error == EFBIG) {
4416 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4417 if (m == NULL) {
4418 m_freem(*m_head);
4419 *m_head = NULL;
4420 return (ENOBUFS);
4421 }
4422 *m_head = m;
4423 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4424 m, segs, &nsegs, BUS_DMA_NOWAIT);
4425 if (error) {
4426 m_freem(m);
4427 *m_head = NULL;
4428 return (error);
4429 }
4430 } else if (error != 0)
4431 return (error);
4432
4433 /* Check if we have enough free send BDs. */
4434 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4435 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4436 return (ENOBUFS);
4437 }
4438
4439 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4440
4441#if __FreeBSD_version > 700022
4442 if (m->m_flags & M_VLANTAG) {
4443 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4444 vlan_tag = m->m_pkthdr.ether_vtag;
4445 }
4446#else
4447 {
4448 struct m_tag *mtag;
4449
4450 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4451 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4452 vlan_tag = VLAN_TAG_VALUE(mtag);
4453 }
4454 }
4455#endif
4456 for (i = 0; ; i++) {
4457 d = &sc->bge_ldata.bge_tx_ring[idx];
4458 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4459 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4460 d->bge_len = segs[i].ds_len;
4461 d->bge_flags = csum_flags;
4462 d->bge_vlan_tag = vlan_tag;
4463 d->bge_mss = mss;
4464 if (i == nsegs - 1)
4465 break;
4466 BGE_INC(idx, BGE_TX_RING_CNT);
4467 }
4468
4469 /* Mark the last segment as end of packet... */
4470 d->bge_flags |= BGE_TXBDFLAG_END;
4471
4472 /*
4473 * Insure that the map for this transmission
4474 * is placed at the array index of the last descriptor
4475 * in this chain.
4476 */
4477 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4478 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4479 sc->bge_cdata.bge_tx_chain[idx] = m;
4480 sc->bge_txcnt += nsegs;
4481
4482 BGE_INC(idx, BGE_TX_RING_CNT);
4483 *txidx = idx;
4484
4485 return (0);
4486}
4487
4488/*
4489 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4490 * to the mbuf data regions directly in the transmit descriptors.
4491 */
4492static void
4493bge_start_locked(struct ifnet *ifp)
4494{
4495 struct bge_softc *sc;
4496 struct mbuf *m_head;
4497 uint32_t prodidx;
4498 int count;
4499
4500 sc = ifp->if_softc;
4501 BGE_LOCK_ASSERT(sc);
4502
4503 if (!sc->bge_link ||
4504 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4505 IFF_DRV_RUNNING)
4506 return;
4507
4508 prodidx = sc->bge_tx_prodidx;
4509
4510 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4511 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4512 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4513 break;
4514 }
4515 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4516 if (m_head == NULL)
4517 break;
4518
4519 /*
4520 * XXX
4521 * The code inside the if() block is never reached since we
4522 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4523 * requests to checksum TCP/UDP in a fragmented packet.
4524 *
4525 * XXX
4526 * safety overkill. If this is a fragmented packet chain
4527 * with delayed TCP/UDP checksums, then only encapsulate
4528 * it if we have enough descriptors to handle the entire
4529 * chain at once.
4530 * (paranoia -- may not actually be needed)
4531 */
4532 if (m_head->m_flags & M_FIRSTFRAG &&
4533 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4534 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4535 m_head->m_pkthdr.csum_data + 16) {
4536 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4537 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4538 break;
4539 }
4540 }
4541
4542 /*
4543 * Pack the data into the transmit ring. If we
4544 * don't have room, set the OACTIVE flag and wait
4545 * for the NIC to drain the ring.
4546 */
4547 if (bge_encap(sc, &m_head, &prodidx)) {
4548 if (m_head == NULL)
4549 break;
4550 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4551 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4552 break;
4553 }
4554 ++count;
4555
4556 /*
4557 * If there's a BPF listener, bounce a copy of this frame
4558 * to him.
4559 */
4560#ifdef ETHER_BPF_MTAP
4561 ETHER_BPF_MTAP(ifp, m_head);
4562#else
4563 BPF_MTAP(ifp, m_head);
4564#endif
4565 }
4566
4567 if (count > 0) {
4568 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4569 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4570 /* Transmit. */
4571 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4572 /* 5700 b2 errata */
4573 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4574 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4575
4576 sc->bge_tx_prodidx = prodidx;
4577
4578 /*
4579 * Set a timeout in case the chip goes out to lunch.
4580 */
4581 sc->bge_timer = 5;
4582 }
4583}
4584
4585/*
4586 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4587 * to the mbuf data regions directly in the transmit descriptors.
4588 */
4589static void
4590bge_start(struct ifnet *ifp)
4591{
4592 struct bge_softc *sc;
4593
4594 sc = ifp->if_softc;
4595 BGE_LOCK(sc);
4596 bge_start_locked(ifp);
4597 BGE_UNLOCK(sc);
4598}
4599
4600static void
4601bge_init_locked(struct bge_softc *sc)
4602{
4603 struct ifnet *ifp;
4604 uint16_t *m;
4605 uint32_t mode;
4606
4607 BGE_LOCK_ASSERT(sc);
4608
4609 ifp = sc->bge_ifp;
4610
4611 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4612 return;
4613
4614 /* Cancel pending I/O and flush buffers. */
4615 bge_stop(sc);
4616
4617 bge_stop_fw(sc);
4618 bge_sig_pre_reset(sc, BGE_RESET_START);
4619 bge_reset(sc);
4620 bge_sig_legacy(sc, BGE_RESET_START);
4621 bge_sig_post_reset(sc, BGE_RESET_START);
4622
4623 bge_chipinit(sc);
4624
4625 /*
4626 * Init the various state machines, ring
4627 * control blocks and firmware.
4628 */
4629 if (bge_blockinit(sc)) {
4630 device_printf(sc->bge_dev, "initialization failure\n");
4631 return;
4632 }
4633
4634 ifp = sc->bge_ifp;
4635
4636 /* Specify MTU. */
4637 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4638 ETHER_HDR_LEN + ETHER_CRC_LEN +
4639 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4640
4641 /* Load our MAC address. */
4642 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4643 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4644 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4645
4646 /* Program promiscuous mode. */
4647 bge_setpromisc(sc);
4648
4649 /* Program multicast filter. */
4650 bge_setmulti(sc);
4651
4652 /* Program VLAN tag stripping. */
4653 bge_setvlan(sc);
4654
4655 /* Override UDP checksum offloading. */
4656 if (sc->bge_forced_udpcsum == 0)
4657 sc->bge_csum_features &= ~CSUM_UDP;
4658 else
4659 sc->bge_csum_features |= CSUM_UDP;
4660 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4661 ifp->if_capenable & IFCAP_TXCSUM) {
4662 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4663 ifp->if_hwassist |= sc->bge_csum_features;
4664 }
4665
4666 /* Init RX ring. */
4667 if (bge_init_rx_ring_std(sc) != 0) {
4668 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4669 bge_stop(sc);
4670 return;
4671 }
4672
4673 /*
4674 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4675 * memory to insure that the chip has in fact read the first
4676 * entry of the ring.
4677 */
4678 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4679 uint32_t v, i;
4680 for (i = 0; i < 10; i++) {
4681 DELAY(20);
4682 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4683 if (v == (MCLBYTES - ETHER_ALIGN))
4684 break;
4685 }
4686 if (i == 10)
4687 device_printf (sc->bge_dev,
4688 "5705 A0 chip failed to load RX ring\n");
4689 }
4690
4691 /* Init jumbo RX ring. */
4692 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4693 (MCLBYTES - ETHER_ALIGN)) {
4694 if (bge_init_rx_ring_jumbo(sc) != 0) {
4695 device_printf(sc->bge_dev,
4696 "no memory for jumbo Rx buffers.\n");
4697 bge_stop(sc);
4698 return;
4699 }
4700 }
4701
4702 /* Init our RX return ring index. */
4703 sc->bge_rx_saved_considx = 0;
4704
4705 /* Init our RX/TX stat counters. */
4706 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4707
4708 /* Init TX ring. */
4709 bge_init_tx_ring(sc);
4710
4711 /* Enable TX MAC state machine lockup fix. */
4712 mode = CSR_READ_4(sc, BGE_TX_MODE);
4713 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4714 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4715 /* Turn on transmitter. */
4716 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4717
4718 /* Turn on receiver. */
4719 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4720
4721 /*
4722 * Set the number of good frames to receive after RX MBUF
4723 * Low Watermark has been reached. After the RX MAC receives
4724 * this number of frames, it will drop subsequent incoming
4725 * frames until the MBUF High Watermark is reached.
4726 */
4727 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4728
4729 /* Clear MAC statistics. */
4730 if (BGE_IS_5705_PLUS(sc))
4731 bge_stats_clear_regs(sc);
4732
4733 /* Tell firmware we're alive. */
4734 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4735
4736#ifdef DEVICE_POLLING
4737 /* Disable interrupts if we are polling. */
4738 if (ifp->if_capenable & IFCAP_POLLING) {
4739 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4740 BGE_PCIMISCCTL_MASK_PCI_INTR);
4741 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4742 } else
4743#endif
4744
4745 /* Enable host interrupts. */
4746 {
4747 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4748 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4749 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4750 }
4751
4752 bge_ifmedia_upd_locked(ifp);
4753
4754 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4755 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4756
4757 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4758}
4759
4760static void
4761bge_init(void *xsc)
4762{
4763 struct bge_softc *sc = xsc;
4764
4765 BGE_LOCK(sc);
4766 bge_init_locked(sc);
4767 BGE_UNLOCK(sc);
4768}
4769
4770/*
4771 * Set media options.
4772 */
4773static int
4774bge_ifmedia_upd(struct ifnet *ifp)
4775{
4776 struct bge_softc *sc = ifp->if_softc;
4777 int res;
4778
4779 BGE_LOCK(sc);
4780 res = bge_ifmedia_upd_locked(ifp);
4781 BGE_UNLOCK(sc);
4782
4783 return (res);
4784}
4785
4786static int
4787bge_ifmedia_upd_locked(struct ifnet *ifp)
4788{
4789 struct bge_softc *sc = ifp->if_softc;
4790 struct mii_data *mii;
4791 struct mii_softc *miisc;
4792 struct ifmedia *ifm;
4793
4794 BGE_LOCK_ASSERT(sc);
4795
4796 ifm = &sc->bge_ifmedia;
4797
4798 /* If this is a 1000baseX NIC, enable the TBI port. */
4799 if (sc->bge_flags & BGE_FLAG_TBI) {
4800 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4801 return (EINVAL);
4802 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4803 case IFM_AUTO:
4804 /*
4805 * The BCM5704 ASIC appears to have a special
4806 * mechanism for programming the autoneg
4807 * advertisement registers in TBI mode.
4808 */
4809 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4810 uint32_t sgdig;
4811 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4812 if (sgdig & BGE_SGDIGSTS_DONE) {
4813 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4814 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4815 sgdig |= BGE_SGDIGCFG_AUTO |
4816 BGE_SGDIGCFG_PAUSE_CAP |
4817 BGE_SGDIGCFG_ASYM_PAUSE;
4818 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4819 sgdig | BGE_SGDIGCFG_SEND);
4820 DELAY(5);
4821 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4822 }
4823 }
4824 break;
4825 case IFM_1000_SX:
4826 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4827 BGE_CLRBIT(sc, BGE_MAC_MODE,
4828 BGE_MACMODE_HALF_DUPLEX);
4829 } else {
4830 BGE_SETBIT(sc, BGE_MAC_MODE,
4831 BGE_MACMODE_HALF_DUPLEX);
4832 }
4833 break;
4834 default:
4835 return (EINVAL);
4836 }
4837 return (0);
4838 }
4839
4840 sc->bge_link_evt++;
4841 mii = device_get_softc(sc->bge_miibus);
4842 if (mii->mii_instance)
4843 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4844 mii_phy_reset(miisc);
4845 mii_mediachg(mii);
4846
4847 /*
4848 * Force an interrupt so that we will call bge_link_upd
4849 * if needed and clear any pending link state attention.
4850 * Without this we are not getting any further interrupts
4851 * for link state changes and thus will not UP the link and
4852 * not be able to send in bge_start_locked. The only
4853 * way to get things working was to receive a packet and
4854 * get an RX intr.
4855 * bge_tick should help for fiber cards and we might not
4856 * need to do this here if BGE_FLAG_TBI is set but as
4857 * we poll for fiber anyway it should not harm.
4858 */
4859 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4860 sc->bge_flags & BGE_FLAG_5788)
4861 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4862 else
4863 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4864
4865 return (0);
4866}
4867
4868/*
4869 * Report current media status.
4870 */
4871static void
4872bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4873{
4874 struct bge_softc *sc = ifp->if_softc;
4875 struct mii_data *mii;
4876
4877 BGE_LOCK(sc);
4878
4879 if (sc->bge_flags & BGE_FLAG_TBI) {
4880 ifmr->ifm_status = IFM_AVALID;
4881 ifmr->ifm_active = IFM_ETHER;
4882 if (CSR_READ_4(sc, BGE_MAC_STS) &
4883 BGE_MACSTAT_TBI_PCS_SYNCHED)
4884 ifmr->ifm_status |= IFM_ACTIVE;
4885 else {
4886 ifmr->ifm_active |= IFM_NONE;
4887 BGE_UNLOCK(sc);
4888 return;
4889 }
4890 ifmr->ifm_active |= IFM_1000_SX;
4891 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4892 ifmr->ifm_active |= IFM_HDX;
4893 else
4894 ifmr->ifm_active |= IFM_FDX;
4895 BGE_UNLOCK(sc);
4896 return;
4897 }
4898
4899 mii = device_get_softc(sc->bge_miibus);
4900 mii_pollstat(mii);
4901 ifmr->ifm_active = mii->mii_media_active;
4902 ifmr->ifm_status = mii->mii_media_status;
4903
4904 BGE_UNLOCK(sc);
4905}
4906
4907static int
4908bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4909{
4910 struct bge_softc *sc = ifp->if_softc;
4911 struct ifreq *ifr = (struct ifreq *) data;
4912 struct mii_data *mii;
4913 int flags, mask, error = 0;
4914
4915 switch (command) {
4916 case SIOCSIFMTU:
4917 BGE_LOCK(sc);
4918 if (ifr->ifr_mtu < ETHERMIN ||
4919 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4920 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4921 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4922 ifr->ifr_mtu > ETHERMTU))
4923 error = EINVAL;
4924 else if (ifp->if_mtu != ifr->ifr_mtu) {
4925 ifp->if_mtu = ifr->ifr_mtu;
4926 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4927 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4928 bge_init_locked(sc);
4929 }
4930 }
4931 BGE_UNLOCK(sc);
4932 break;
4933 case SIOCSIFFLAGS:
4934 BGE_LOCK(sc);
4935 if (ifp->if_flags & IFF_UP) {
4936 /*
4937 * If only the state of the PROMISC flag changed,
4938 * then just use the 'set promisc mode' command
4939 * instead of reinitializing the entire NIC. Doing
4940 * a full re-init means reloading the firmware and
4941 * waiting for it to start up, which may take a
4942 * second or two. Similarly for ALLMULTI.
4943 */
4944 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4945 flags = ifp->if_flags ^ sc->bge_if_flags;
4946 if (flags & IFF_PROMISC)
4947 bge_setpromisc(sc);
4948 if (flags & IFF_ALLMULTI)
4949 bge_setmulti(sc);
4950 } else
4951 bge_init_locked(sc);
4952 } else {
4953 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4954 bge_stop(sc);
4955 }
4956 }
4957 sc->bge_if_flags = ifp->if_flags;
4958 BGE_UNLOCK(sc);
4959 error = 0;
4960 break;
4961 case SIOCADDMULTI:
4962 case SIOCDELMULTI:
4963 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4964 BGE_LOCK(sc);
4965 bge_setmulti(sc);
4966 BGE_UNLOCK(sc);
4967 error = 0;
4968 }
4969 break;
4970 case SIOCSIFMEDIA:
4971 case SIOCGIFMEDIA:
4972 if (sc->bge_flags & BGE_FLAG_TBI) {
4973 error = ifmedia_ioctl(ifp, ifr,
4974 &sc->bge_ifmedia, command);
4975 } else {
4976 mii = device_get_softc(sc->bge_miibus);
4977 error = ifmedia_ioctl(ifp, ifr,
4978 &mii->mii_media, command);
4979 }
4980 break;
4981 case SIOCSIFCAP:
4982 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4983#ifdef DEVICE_POLLING
4984 if (mask & IFCAP_POLLING) {
4985 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4986 error = ether_poll_register(bge_poll, ifp);
4987 if (error)
4988 return (error);
4989 BGE_LOCK(sc);
4990 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4991 BGE_PCIMISCCTL_MASK_PCI_INTR);
4992 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4993 ifp->if_capenable |= IFCAP_POLLING;
4994 BGE_UNLOCK(sc);
4995 } else {
4996 error = ether_poll_deregister(ifp);
4997 /* Enable interrupt even in error case */
4998 BGE_LOCK(sc);
4999 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5000 BGE_PCIMISCCTL_MASK_PCI_INTR);
5001 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5002 ifp->if_capenable &= ~IFCAP_POLLING;
5003 BGE_UNLOCK(sc);
5004 }
5005 }
5006#endif
5007 if ((mask & IFCAP_TXCSUM) != 0 &&
5008 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5009 ifp->if_capenable ^= IFCAP_TXCSUM;
5010 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5011 ifp->if_hwassist |= sc->bge_csum_features;
5012 else
5013 ifp->if_hwassist &= ~sc->bge_csum_features;
5014 }
5015
5016 if ((mask & IFCAP_RXCSUM) != 0 &&
5017 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5018 ifp->if_capenable ^= IFCAP_RXCSUM;
5019
5020 if ((mask & IFCAP_TSO4) != 0 &&
5021 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5022 ifp->if_capenable ^= IFCAP_TSO4;
5023 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5024 ifp->if_hwassist |= CSUM_TSO;
5025 else
5026 ifp->if_hwassist &= ~CSUM_TSO;
5027 }
5028
5029 if (mask & IFCAP_VLAN_MTU) {
5030 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5031 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5032 bge_init(sc);
5033 }
5034
5035 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5036 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5037 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5038 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5039 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5040 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5041 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5042 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5043 BGE_LOCK(sc);
5044 bge_setvlan(sc);
5045 BGE_UNLOCK(sc);
5046 }
5047#ifdef VLAN_CAPABILITIES
5048 VLAN_CAPABILITIES(ifp);
5049#endif
5050 break;
5051 default:
5052 error = ether_ioctl(ifp, command, data);
5053 break;
5054 }
5055
5056 return (error);
5057}
5058
5059static void
5060bge_watchdog(struct bge_softc *sc)
5061{
5062 struct ifnet *ifp;
5063
5064 BGE_LOCK_ASSERT(sc);
5065
5066 if (sc->bge_timer == 0 || --sc->bge_timer)
5067 return;
5068
5069 ifp = sc->bge_ifp;
5070
5071 if_printf(ifp, "watchdog timeout -- resetting\n");
5072
5073 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5074 bge_init_locked(sc);
5075
5076 ifp->if_oerrors++;
5077}
5078
5079/*
5080 * Stop the adapter and free any mbufs allocated to the
5081 * RX and TX lists.
5082 */
5083static void
5084bge_stop(struct bge_softc *sc)
5085{
5086 struct ifnet *ifp;
5087
5088 BGE_LOCK_ASSERT(sc);
5089
5090 ifp = sc->bge_ifp;
5091
5092 callout_stop(&sc->bge_stat_ch);
5093
5094 /* Disable host interrupts. */
5095 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5096 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5097
5098 /*
5099 * Tell firmware we're shutting down.
5100 */
5101 bge_stop_fw(sc);
5102 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5103
5104 /*
5105 * Disable all of the receiver blocks.
5106 */
5107 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5108 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5109 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5110 if (!(BGE_IS_5705_PLUS(sc)))
5111 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5112 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5113 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5114 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5115
5116 /*
5117 * Disable all of the transmit blocks.
5118 */
5119 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5120 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5121 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5122 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5123 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5124 if (!(BGE_IS_5705_PLUS(sc)))
5125 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5126 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5127
5128 /*
5129 * Shut down all of the memory managers and related
5130 * state machines.
5131 */
5132 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5133 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5134 if (!(BGE_IS_5705_PLUS(sc)))
5135 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5136 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5137 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5138 if (!(BGE_IS_5705_PLUS(sc))) {
5139 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5140 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5141 }
5142 /* Update MAC statistics. */
5143 if (BGE_IS_5705_PLUS(sc))
5144 bge_stats_update_regs(sc);
5145
5146 bge_reset(sc);
5147 bge_sig_legacy(sc, BGE_RESET_STOP);
5148 bge_sig_post_reset(sc, BGE_RESET_STOP);
5149
5150 /*
5151 * Keep the ASF firmware running if up.
5152 */
5153 if (sc->bge_asf_mode & ASF_STACKUP)
5154 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5155 else
5156 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5157
5158 /* Free the RX lists. */
5159 bge_free_rx_ring_std(sc);
5160
5161 /* Free jumbo RX list. */
5162 if (BGE_IS_JUMBO_CAPABLE(sc))
5163 bge_free_rx_ring_jumbo(sc);
5164
5165 /* Free TX buffers. */
5166 bge_free_tx_ring(sc);
5167
5168 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5169
5170 /* Clear MAC's link state (PHY may still have link UP). */
5171 if (bootverbose && sc->bge_link)
5172 if_printf(sc->bge_ifp, "link DOWN\n");
5173 sc->bge_link = 0;
5174
5175 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5176}
5177
5178/*
5179 * Stop all chip I/O so that the kernel's probe routines don't
5180 * get confused by errant DMAs when rebooting.
5181 */
5182static int
5183bge_shutdown(device_t dev)
5184{
5185 struct bge_softc *sc;
5186
5187 sc = device_get_softc(dev);
5188 BGE_LOCK(sc);
5189 bge_stop(sc);
5190 bge_reset(sc);
5191 BGE_UNLOCK(sc);
5192
5193 return (0);
5194}
5195
5196static int
5197bge_suspend(device_t dev)
5198{
5199 struct bge_softc *sc;
5200
5201 sc = device_get_softc(dev);
5202 BGE_LOCK(sc);
5203 bge_stop(sc);
5204 BGE_UNLOCK(sc);
5205
5206 return (0);
5207}
5208
5209static int
5210bge_resume(device_t dev)
5211{
5212 struct bge_softc *sc;
5213 struct ifnet *ifp;
5214
5215 sc = device_get_softc(dev);
5216 BGE_LOCK(sc);
5217 ifp = sc->bge_ifp;
5218 if (ifp->if_flags & IFF_UP) {
5219 bge_init_locked(sc);
5220 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5221 bge_start_locked(ifp);
5222 }
5223 BGE_UNLOCK(sc);
5224
5225 return (0);
5226}
5227
5228static void
5229bge_link_upd(struct bge_softc *sc)
5230{
5231 struct mii_data *mii;
5232 uint32_t link, status;
5233
5234 BGE_LOCK_ASSERT(sc);
5235
5236 /* Clear 'pending link event' flag. */
5237 sc->bge_link_evt = 0;
5238
5239 /*
5240 * Process link state changes.
5241 * Grrr. The link status word in the status block does
5242 * not work correctly on the BCM5700 rev AX and BX chips,
5243 * according to all available information. Hence, we have
5244 * to enable MII interrupts in order to properly obtain
5245 * async link changes. Unfortunately, this also means that
5246 * we have to read the MAC status register to detect link
5247 * changes, thereby adding an additional register access to
5248 * the interrupt handler.
5249 *
5250 * XXX: perhaps link state detection procedure used for
5251 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5252 */
5253
5254 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5255 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5256 status = CSR_READ_4(sc, BGE_MAC_STS);
5257 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5258 mii = device_get_softc(sc->bge_miibus);
5259 mii_pollstat(mii);
5260 if (!sc->bge_link &&
5261 mii->mii_media_status & IFM_ACTIVE &&
5262 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5263 sc->bge_link++;
5264 if (bootverbose)
5265 if_printf(sc->bge_ifp, "link UP\n");
5266 } else if (sc->bge_link &&
5267 (!(mii->mii_media_status & IFM_ACTIVE) ||
5268 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5269 sc->bge_link = 0;
5270 if (bootverbose)
5271 if_printf(sc->bge_ifp, "link DOWN\n");
5272 }
5273
5274 /* Clear the interrupt. */
5275 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5276 BGE_EVTENB_MI_INTERRUPT);
5277 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5278 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5279 BRGPHY_INTRS);
5280 }
5281 return;
5282 }
5283
5284 if (sc->bge_flags & BGE_FLAG_TBI) {
5285 status = CSR_READ_4(sc, BGE_MAC_STS);
5286 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5287 if (!sc->bge_link) {
5288 sc->bge_link++;
5289 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5290 BGE_CLRBIT(sc, BGE_MAC_MODE,
5291 BGE_MACMODE_TBI_SEND_CFGS);
5292 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5293 if (bootverbose)
5294 if_printf(sc->bge_ifp, "link UP\n");
5295 if_link_state_change(sc->bge_ifp,
5296 LINK_STATE_UP);
5297 }
5298 } else if (sc->bge_link) {
5299 sc->bge_link = 0;
5300 if (bootverbose)
5301 if_printf(sc->bge_ifp, "link DOWN\n");
5302 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5303 }
5304 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5305 /*
5306 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5307 * in status word always set. Workaround this bug by reading
5308 * PHY link status directly.
5309 */
5310 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5311
5312 if (link != sc->bge_link ||
5313 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5314 mii = device_get_softc(sc->bge_miibus);
5315 mii_pollstat(mii);
5316 if (!sc->bge_link &&
5317 mii->mii_media_status & IFM_ACTIVE &&
5318 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5319 sc->bge_link++;
5320 if (bootverbose)
5321 if_printf(sc->bge_ifp, "link UP\n");
5322 } else if (sc->bge_link &&
5323 (!(mii->mii_media_status & IFM_ACTIVE) ||
5324 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5325 sc->bge_link = 0;
5326 if (bootverbose)
5327 if_printf(sc->bge_ifp, "link DOWN\n");
5328 }
5329 }
5330 } else {
5331 /*
5332 * For controllers that call mii_tick, we have to poll
5333 * link status.
5334 */
5335 mii = device_get_softc(sc->bge_miibus);
5336 mii_pollstat(mii);
5337 bge_miibus_statchg(sc->bge_dev);
5338 }
5339
5340 /* Clear the attention. */
5341 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5342 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5343 BGE_MACSTAT_LINK_CHANGED);
5344}
5345
5346static void
5347bge_add_sysctls(struct bge_softc *sc)
5348{
5349 struct sysctl_ctx_list *ctx;
5350 struct sysctl_oid_list *children;
5351 char tn[32];
5352 int unit;
5353
5354 ctx = device_get_sysctl_ctx(sc->bge_dev);
5355 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5356
5357#ifdef BGE_REGISTER_DEBUG
5358 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5359 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5360 "Debug Information");
5361
5362 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5363 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5364 "Register Read");
5365
5366 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5367 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5368 "Memory Read");
5369
5370#endif
5371
5372 unit = device_get_unit(sc->bge_dev);
5373 /*
5374 * A common design characteristic for many Broadcom client controllers
5375 * is that they only support a single outstanding DMA read operation
5376 * on the PCIe bus. This means that it will take twice as long to fetch
5377 * a TX frame that is split into header and payload buffers as it does
5378 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5379 * these controllers, coalescing buffers to reduce the number of memory
5380 * reads is effective way to get maximum performance(about 940Mbps).
5381 * Without collapsing TX buffers the maximum TCP bulk transfer
5382 * performance is about 850Mbps. However forcing coalescing mbufs
5383 * consumes a lot of CPU cycles, so leave it off by default.
5384 */
5385 sc->bge_forced_collapse = 0;
5386 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5387 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5388 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5389 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5390 "Number of fragmented TX buffers of a frame allowed before "
5391 "forced collapsing");
5392
5393 /*
5394 * It seems all Broadcom controllers have a bug that can generate UDP
5395 * datagrams with checksum value 0 when TX UDP checksum offloading is
5396 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5397 * Even though the probability of generating such UDP datagrams is
5398 * low, I don't want to see FreeBSD boxes to inject such datagrams
5399 * into network so disable UDP checksum offloading by default. Users
5400 * still override this behavior by setting a sysctl variable,
5401 * dev.bge.0.forced_udpcsum.
5402 */
5403 sc->bge_forced_udpcsum = 0;
5404 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5405 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5406 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5407 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5408 "Enable UDP checksum offloading even if controller can "
5409 "generate UDP checksum value 0");
5410
5411 if (BGE_IS_5705_PLUS(sc))
5412 bge_add_sysctl_stats_regs(sc, ctx, children);
5413 else
5414 bge_add_sysctl_stats(sc, ctx, children);
5415}
5416
5417#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5418 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5419 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5420 desc)
5421
5422static void
5423bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5424 struct sysctl_oid_list *parent)
5425{
5426 struct sysctl_oid *tree;
5427 struct sysctl_oid_list *children, *schildren;
5428
5429 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5430 NULL, "BGE Statistics");
5431 schildren = children = SYSCTL_CHILDREN(tree);
5432 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5433 children, COSFramesDroppedDueToFilters,
5434 "FramesDroppedDueToFilters");
5435 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5436 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5437 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5438 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5439 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5440 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5441 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5442 children, ifInDiscards, "InputDiscards");
5443 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5444 children, ifInErrors, "InputErrors");
5445 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5446 children, nicRecvThresholdHit, "RecvThresholdHit");
5447 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5448 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5449 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5450 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5451 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5452 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5453 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5454 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5455 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5456 children, nicRingStatusUpdate, "RingStatusUpdate");
5457 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5458 children, nicInterrupts, "Interrupts");
5459 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5460 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5461 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5462 children, nicSendThresholdHit, "SendThresholdHit");
5463
5464 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5465 NULL, "BGE RX Statistics");
5466 children = SYSCTL_CHILDREN(tree);
5467 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5468 children, rxstats.ifHCInOctets, "ifHCInOctets");
5469 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5470 children, rxstats.etherStatsFragments, "Fragments");
5471 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5472 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5473 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5474 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5475 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5476 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5477 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5478 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5479 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5480 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5481 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5482 children, rxstats.xoffPauseFramesReceived,
5483 "xoffPauseFramesReceived");
5484 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5485 children, rxstats.macControlFramesReceived,
5486 "ControlFramesReceived");
5487 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5488 children, rxstats.xoffStateEntered, "xoffStateEntered");
5489 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5490 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5491 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5492 children, rxstats.etherStatsJabbers, "Jabbers");
5493 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5494 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5495 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5496 children, rxstats.inRangeLengthError, "inRangeLengthError");
5497 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5498 children, rxstats.outRangeLengthError, "outRangeLengthError");
5499
5500 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5501 NULL, "BGE TX Statistics");
5502 children = SYSCTL_CHILDREN(tree);
5503 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5504 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5505 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5506 children, txstats.etherStatsCollisions, "Collisions");
5507 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5508 children, txstats.outXonSent, "XonSent");
5509 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5510 children, txstats.outXoffSent, "XoffSent");
5511 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5512 children, txstats.flowControlDone, "flowControlDone");
5513 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5514 children, txstats.dot3StatsInternalMacTransmitErrors,
5515 "InternalMacTransmitErrors");
5516 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5517 children, txstats.dot3StatsSingleCollisionFrames,
5518 "SingleCollisionFrames");
5519 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5520 children, txstats.dot3StatsMultipleCollisionFrames,
5521 "MultipleCollisionFrames");
5522 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5523 children, txstats.dot3StatsDeferredTransmissions,
5524 "DeferredTransmissions");
5525 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5526 children, txstats.dot3StatsExcessiveCollisions,
5527 "ExcessiveCollisions");
5528 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5529 children, txstats.dot3StatsLateCollisions,
5530 "LateCollisions");
5531 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5532 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5533 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5534 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5535 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5536 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5537 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5538 children, txstats.dot3StatsCarrierSenseErrors,
5539 "CarrierSenseErrors");
5540 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5541 children, txstats.ifOutDiscards, "Discards");
5542 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5543 children, txstats.ifOutErrors, "Errors");
5544}
5545
5546#undef BGE_SYSCTL_STAT
5547
5548#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5549 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5550
5551static void
5552bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5553 struct sysctl_oid_list *parent)
5554{
5555 struct sysctl_oid *tree;
5556 struct sysctl_oid_list *child, *schild;
5557 struct bge_mac_stats *stats;
5558
5559 stats = &sc->bge_mac_stats;
5560 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5561 NULL, "BGE Statistics");
5562 schild = child = SYSCTL_CHILDREN(tree);
5563 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5564 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5565 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5566 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5567 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5568 &stats->DmaWriteHighPriQueueFull,
5569 "NIC DMA Write High Priority Queue Full");
5570 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5571 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5572 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5573 &stats->InputDiscards, "Discarded Input Frames");
5574 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5575 &stats->InputErrors, "Input Errors");
5576 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5577 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5578
5579 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5580 NULL, "BGE RX Statistics");
5581 child = SYSCTL_CHILDREN(tree);
5582 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5583 &stats->ifHCInOctets, "Inbound Octets");
5584 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5585 &stats->etherStatsFragments, "Fragments");
5586 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5587 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5588 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5589 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5590 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5591 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5592 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5593 &stats->dot3StatsFCSErrors, "FCS Errors");
5594 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5595 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5596 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5597 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5598 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5599 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5600 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5601 &stats->macControlFramesReceived, "MAC Control Frames Received");
5602 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5603 &stats->xoffStateEntered, "XOFF State Entered");
5604 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5605 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5606 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5607 &stats->etherStatsJabbers, "Jabbers");
5608 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5609 &stats->etherStatsUndersizePkts, "Undersized Packets");
5610
5611 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5612 NULL, "BGE TX Statistics");
5613 child = SYSCTL_CHILDREN(tree);
5614 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5615 &stats->ifHCOutOctets, "Outbound Octets");
5616 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5617 &stats->etherStatsCollisions, "TX Collisions");
5618 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5619 &stats->outXonSent, "XON Sent");
5620 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5621 &stats->outXoffSent, "XOFF Sent");
5622 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5623 &stats->dot3StatsInternalMacTransmitErrors,
5624 "Internal MAC TX Errors");
5625 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5626 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5627 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5628 &stats->dot3StatsMultipleCollisionFrames,
5629 "Multiple Collision Frames");
5630 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5631 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5632 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5633 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5634 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5635 &stats->dot3StatsLateCollisions, "Late Collisions");
5636 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5637 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5638 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5639 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5640 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5641 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5642}
5643
5644#undef BGE_SYSCTL_STAT_ADD64
5645
5646static int
5647bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5648{
5649 struct bge_softc *sc;
5650 uint32_t result;
5651 int offset;
5652
5653 sc = (struct bge_softc *)arg1;
5654 offset = arg2;
5655 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5656 offsetof(bge_hostaddr, bge_addr_lo));
5657 return (sysctl_handle_int(oidp, &result, 0, req));
5658}
5659
5660#ifdef BGE_REGISTER_DEBUG
5661static int
5662bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5663{
5664 struct bge_softc *sc;
5665 uint16_t *sbdata;
5666 int error;
5667 int result;
5668 int i, j;
5669
5670 result = -1;
5671 error = sysctl_handle_int(oidp, &result, 0, req);
5672 if (error || (req->newptr == NULL))
5673 return (error);
5674
5675 if (result == 1) {
5676 sc = (struct bge_softc *)arg1;
5677
5678 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5679 printf("Status Block:\n");
5680 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5681 printf("%06x:", i);
5682 for (j = 0; j < 8; j++) {
5683 printf(" %04x", sbdata[i]);
5684 i += 4;
5685 }
5686 printf("\n");
5687 }
5688
5689 printf("Registers:\n");
5690 for (i = 0x800; i < 0xA00; ) {
5691 printf("%06x:", i);
5692 for (j = 0; j < 8; j++) {
5693 printf(" %08x", CSR_READ_4(sc, i));
5694 i += 4;
5695 }
5696 printf("\n");
5697 }
5698
5699 printf("Hardware Flags:\n");
5700 if (BGE_IS_5755_PLUS(sc))
5701 printf(" - 5755 Plus\n");
5702 if (BGE_IS_575X_PLUS(sc))
5703 printf(" - 575X Plus\n");
5704 if (BGE_IS_5705_PLUS(sc))
5705 printf(" - 5705 Plus\n");
5706 if (BGE_IS_5714_FAMILY(sc))
5707 printf(" - 5714 Family\n");
5708 if (BGE_IS_5700_FAMILY(sc))
5709 printf(" - 5700 Family\n");
5710 if (sc->bge_flags & BGE_FLAG_JUMBO)
5711 printf(" - Supports Jumbo Frames\n");
5712 if (sc->bge_flags & BGE_FLAG_PCIX)
5713 printf(" - PCI-X Bus\n");
5714 if (sc->bge_flags & BGE_FLAG_PCIE)
5715 printf(" - PCI Express Bus\n");
5716 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5717 printf(" - No 3 LEDs\n");
5718 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5719 printf(" - RX Alignment Bug\n");
5720 }
5721
5722 return (error);
5723}
5724
5725static int
5726bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5727{
5728 struct bge_softc *sc;
5729 int error;
5730 uint16_t result;
5731 uint32_t val;
5732
5733 result = -1;
5734 error = sysctl_handle_int(oidp, &result, 0, req);
5735 if (error || (req->newptr == NULL))
5736 return (error);
5737
5738 if (result < 0x8000) {
5739 sc = (struct bge_softc *)arg1;
5740 val = CSR_READ_4(sc, result);
5741 printf("reg 0x%06X = 0x%08X\n", result, val);
5742 }
5743
5744 return (error);
5745}
5746
5747static int
5748bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5749{
5750 struct bge_softc *sc;
5751 int error;
5752 uint16_t result;
5753 uint32_t val;
5754
5755 result = -1;
5756 error = sysctl_handle_int(oidp, &result, 0, req);
5757 if (error || (req->newptr == NULL))
5758 return (error);
5759
5760 if (result < 0x8000) {
5761 sc = (struct bge_softc *)arg1;
5762 val = bge_readmem_ind(sc, result);
5763 printf("mem 0x%06X = 0x%08X\n", result, val);
5764 }
5765
5766 return (error);
5767}
5768#endif
5769
5770static int
5771bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5772{
5773
5774 if (sc->bge_flags & BGE_FLAG_EADDR)
5775 return (1);
5776
5777#ifdef __sparc64__
5778 OF_getetheraddr(sc->bge_dev, ether_addr);
5779 return (0);
5780#endif
5781 return (1);
5782}
5783
5784static int
5785bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5786{
5787 uint32_t mac_addr;
5788
5789 mac_addr = bge_readmem_ind(sc, 0x0c14);
5790 if ((mac_addr >> 16) == 0x484b) {
5791 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5792 ether_addr[1] = (uint8_t)mac_addr;
5793 mac_addr = bge_readmem_ind(sc, 0x0c18);
5794 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5795 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5796 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5797 ether_addr[5] = (uint8_t)mac_addr;
5798 return (0);
5799 }
5800 return (1);
5801}
5802
5803static int
5804bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5805{
5806 int mac_offset = BGE_EE_MAC_OFFSET;
5807
5808 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5809 mac_offset = BGE_EE_MAC_OFFSET_5906;
5810
5811 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5812 ETHER_ADDR_LEN));
5813}
5814
5815static int
5816bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5817{
5818
5819 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5820 return (1);
5821
5822 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5823 ETHER_ADDR_LEN));
5824}
5825
5826static int
5827bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5828{
5829 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5830 /* NOTE: Order is critical */
5831 bge_get_eaddr_fw,
5832 bge_get_eaddr_mem,
5833 bge_get_eaddr_nvram,
5834 bge_get_eaddr_eeprom,
5835 NULL
5836 };
5837 const bge_eaddr_fcn_t *func;
5838
5839 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5840 if ((*func)(sc, eaddr) == 0)
5841 break;
5842 }
5843 return (*func == NULL ? ENXIO : 0);
5844}