Deleted Added
full compact
if_bge.c (219902) if_bge.c (220368)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 219902 2011-03-23 13:10:15Z jhb $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 220368 2011-04-05 17:41:54Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
220
221 { SK_VENDORID, SK_DEVICEID_ALTIMA },
222
223 { TC_VENDORID, TC_DEVICEID_3C996 },
224
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
226 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
227 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
228
229 { 0, 0 }
230};
231
232static const struct bge_vendor {
233 uint16_t v_id;
234 const char *v_name;
235} const bge_vendors[] = {
236 { ALTEON_VENDORID, "Alteon" },
237 { ALTIMA_VENDORID, "Altima" },
238 { APPLE_VENDORID, "Apple" },
239 { BCOM_VENDORID, "Broadcom" },
240 { SK_VENDORID, "SysKonnect" },
241 { TC_VENDORID, "3Com" },
242 { FJTSU_VENDORID, "Fujitsu" },
243
244 { 0, NULL }
245};
246
247static const struct bge_revision {
248 uint32_t br_chipid;
249 const char *br_name;
250} const bge_revisions[] = {
251 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
252 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
253 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
254 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
255 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
256 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
257 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
258 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
259 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
260 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
261 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
262 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
263 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
264 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
265 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
266 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
267 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
268 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
269 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
270 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
271 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
272 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
273 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
274 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
275 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
276 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
277 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
278 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
279 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
280 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
281 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
282 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
283 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
284 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
285 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
286 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
287 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
288 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
289 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
290 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
291 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
292 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
293 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
294 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
295 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
296 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
297 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
298 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
299 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
300 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
301 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
302 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
303 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
304 /* 5754 and 5787 share the same ASIC ID */
305 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
306 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
307 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
308 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
309 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
310 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
311 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
312
313 { 0, NULL }
314};
315
316/*
317 * Some defaults for major revisions, so that newer steppings
318 * that we don't know about have a shot at working.
319 */
320static const struct bge_revision const bge_majorrevs[] = {
321 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
322 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
323 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
324 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
325 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
326 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
327 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
328 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
329 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
330 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
331 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
332 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
333 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
334 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
335 /* 5754 and 5787 share the same ASIC ID */
336 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
337 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
338 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
339 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
340
341 { 0, NULL }
342};
343
344#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
345#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
346#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
347#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
348#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
349#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
350#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
351
352const struct bge_revision * bge_lookup_rev(uint32_t);
353const struct bge_vendor * bge_lookup_vendor(uint16_t);
354
355typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
356
357static int bge_probe(device_t);
358static int bge_attach(device_t);
359static int bge_detach(device_t);
360static int bge_suspend(device_t);
361static int bge_resume(device_t);
362static void bge_release_resources(struct bge_softc *);
363static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
364static int bge_dma_alloc(struct bge_softc *);
365static void bge_dma_free(struct bge_softc *);
366static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
367 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
368
369static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
370static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
371static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
372static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
373static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
374
375static void bge_txeof(struct bge_softc *, uint16_t);
376static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
377static int bge_rxeof(struct bge_softc *, uint16_t, int);
378
379static void bge_asf_driver_up (struct bge_softc *);
380static void bge_tick(void *);
381static void bge_stats_clear_regs(struct bge_softc *);
382static void bge_stats_update(struct bge_softc *);
383static void bge_stats_update_regs(struct bge_softc *);
384static struct mbuf *bge_check_short_dma(struct mbuf *);
385static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
386 uint16_t *, uint16_t *);
387static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
388
389static void bge_intr(void *);
390static int bge_msi_intr(void *);
391static void bge_intr_task(void *, int);
392static void bge_start_locked(struct ifnet *);
393static void bge_start(struct ifnet *);
394static int bge_ioctl(struct ifnet *, u_long, caddr_t);
395static void bge_init_locked(struct bge_softc *);
396static void bge_init(void *);
397static void bge_stop(struct bge_softc *);
398static void bge_watchdog(struct bge_softc *);
399static int bge_shutdown(device_t);
400static int bge_ifmedia_upd_locked(struct ifnet *);
401static int bge_ifmedia_upd(struct ifnet *);
402static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
403
404static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
405static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
406
407static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
408static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
409
410static void bge_setpromisc(struct bge_softc *);
411static void bge_setmulti(struct bge_softc *);
412static void bge_setvlan(struct bge_softc *);
413
414static __inline void bge_rxreuse_std(struct bge_softc *, int);
415static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
416static int bge_newbuf_std(struct bge_softc *, int);
417static int bge_newbuf_jumbo(struct bge_softc *, int);
418static int bge_init_rx_ring_std(struct bge_softc *);
419static void bge_free_rx_ring_std(struct bge_softc *);
420static int bge_init_rx_ring_jumbo(struct bge_softc *);
421static void bge_free_rx_ring_jumbo(struct bge_softc *);
422static void bge_free_tx_ring(struct bge_softc *);
423static int bge_init_tx_ring(struct bge_softc *);
424
425static int bge_chipinit(struct bge_softc *);
426static int bge_blockinit(struct bge_softc *);
427
428static int bge_has_eaddr(struct bge_softc *);
429static uint32_t bge_readmem_ind(struct bge_softc *, int);
430static void bge_writemem_ind(struct bge_softc *, int, int);
431static void bge_writembx(struct bge_softc *, int, int);
432#ifdef notdef
433static uint32_t bge_readreg_ind(struct bge_softc *, int);
434#endif
435static void bge_writemem_direct(struct bge_softc *, int, int);
436static void bge_writereg_ind(struct bge_softc *, int, int);
437
438static int bge_miibus_readreg(device_t, int, int);
439static int bge_miibus_writereg(device_t, int, int, int);
440static void bge_miibus_statchg(device_t);
441#ifdef DEVICE_POLLING
442static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
443#endif
444
445#define BGE_RESET_START 1
446#define BGE_RESET_STOP 2
447static void bge_sig_post_reset(struct bge_softc *, int);
448static void bge_sig_legacy(struct bge_softc *, int);
449static void bge_sig_pre_reset(struct bge_softc *, int);
450static void bge_stop_fw(struct bge_softc *);
451static int bge_reset(struct bge_softc *);
452static void bge_link_upd(struct bge_softc *);
453
454/*
455 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
456 * leak information to untrusted users. It is also known to cause alignment
457 * traps on certain architectures.
458 */
459#ifdef BGE_REGISTER_DEBUG
460static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
461static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
462static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
463#endif
464static void bge_add_sysctls(struct bge_softc *);
465static void bge_add_sysctl_stats_regs(struct bge_softc *,
466 struct sysctl_ctx_list *, struct sysctl_oid_list *);
467static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
468 struct sysctl_oid_list *);
469static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
470
471static device_method_t bge_methods[] = {
472 /* Device interface */
473 DEVMETHOD(device_probe, bge_probe),
474 DEVMETHOD(device_attach, bge_attach),
475 DEVMETHOD(device_detach, bge_detach),
476 DEVMETHOD(device_shutdown, bge_shutdown),
477 DEVMETHOD(device_suspend, bge_suspend),
478 DEVMETHOD(device_resume, bge_resume),
479
480 /* bus interface */
481 DEVMETHOD(bus_print_child, bus_generic_print_child),
482 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
483
484 /* MII interface */
485 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
486 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
487 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
488
489 { 0, 0 }
490};
491
492static driver_t bge_driver = {
493 "bge",
494 bge_methods,
495 sizeof(struct bge_softc)
496};
497
498static devclass_t bge_devclass;
499
500DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
501DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
502
503static int bge_allow_asf = 1;
504
505TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
506
507SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
508SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
509 "Allow ASF mode if available");
510
511#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
512#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
513#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
514#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
515#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
516
517static int
518bge_has_eaddr(struct bge_softc *sc)
519{
520#ifdef __sparc64__
521 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
522 device_t dev;
523 uint32_t subvendor;
524
525 dev = sc->bge_dev;
526
527 /*
528 * The on-board BGEs found in sun4u machines aren't fitted with
529 * an EEPROM which means that we have to obtain the MAC address
530 * via OFW and that some tests will always fail. We distinguish
531 * such BGEs by the subvendor ID, which also has to be obtained
532 * from OFW instead of the PCI configuration space as the latter
533 * indicates Broadcom as the subvendor of the netboot interface.
534 * For early Blade 1500 and 2500 we even have to check the OFW
535 * device path as the subvendor ID always defaults to Broadcom
536 * there.
537 */
538 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
539 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
540 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
541 return (0);
542 memset(buf, 0, sizeof(buf));
543 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
544 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
545 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
546 return (0);
547 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
548 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
549 return (0);
550 }
551#endif
552 return (1);
553}
554
555static uint32_t
556bge_readmem_ind(struct bge_softc *sc, int off)
557{
558 device_t dev;
559 uint32_t val;
560
561 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
562 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
563 return (0);
564
565 dev = sc->bge_dev;
566
567 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
568 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
569 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
570 return (val);
571}
572
573static void
574bge_writemem_ind(struct bge_softc *sc, int off, int val)
575{
576 device_t dev;
577
578 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
579 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
580 return;
581
582 dev = sc->bge_dev;
583
584 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
585 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
586 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
587}
588
589#ifdef notdef
590static uint32_t
591bge_readreg_ind(struct bge_softc *sc, int off)
592{
593 device_t dev;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
598 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
599}
600#endif
601
602static void
603bge_writereg_ind(struct bge_softc *sc, int off, int val)
604{
605 device_t dev;
606
607 dev = sc->bge_dev;
608
609 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
611}
612
613static void
614bge_writemem_direct(struct bge_softc *sc, int off, int val)
615{
616 CSR_WRITE_4(sc, off, val);
617}
618
619static void
620bge_writembx(struct bge_softc *sc, int off, int val)
621{
622 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
623 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624
625 CSR_WRITE_4(sc, off, val);
626}
627
628/*
629 * Map a single buffer address.
630 */
631
632static void
633bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
634{
635 struct bge_dmamap_arg *ctx;
636
637 if (error)
638 return;
639
640 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
641
642 ctx = arg;
643 ctx->bge_busaddr = segs->ds_addr;
644}
645
646static uint8_t
647bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
648{
649 uint32_t access, byte = 0;
650 int i;
651
652 /* Lock. */
653 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
654 for (i = 0; i < 8000; i++) {
655 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
656 break;
657 DELAY(20);
658 }
659 if (i == 8000)
660 return (1);
661
662 /* Enable access. */
663 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
664 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
665
666 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
667 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
668 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
669 DELAY(10);
670 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
671 DELAY(10);
672 break;
673 }
674 }
675
676 if (i == BGE_TIMEOUT * 10) {
677 if_printf(sc->bge_ifp, "nvram read timed out\n");
678 return (1);
679 }
680
681 /* Get result. */
682 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
683
684 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
685
686 /* Disable access. */
687 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
688
689 /* Unlock. */
690 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
691 CSR_READ_4(sc, BGE_NVRAM_SWARB);
692
693 return (0);
694}
695
696/*
697 * Read a sequence of bytes from NVRAM.
698 */
699static int
700bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
701{
702 int err = 0, i;
703 uint8_t byte = 0;
704
705 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
706 return (1);
707
708 for (i = 0; i < cnt; i++) {
709 err = bge_nvram_getbyte(sc, off + i, &byte);
710 if (err)
711 break;
712 *(dest + i) = byte;
713 }
714
715 return (err ? 1 : 0);
716}
717
718/*
719 * Read a byte of data stored in the EEPROM at address 'addr.' The
720 * BCM570x supports both the traditional bitbang interface and an
721 * auto access interface for reading the EEPROM. We use the auto
722 * access method.
723 */
724static uint8_t
725bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
726{
727 int i;
728 uint32_t byte = 0;
729
730 /*
731 * Enable use of auto EEPROM access so we can avoid
732 * having to use the bitbang method.
733 */
734 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
735
736 /* Reset the EEPROM, load the clock period. */
737 CSR_WRITE_4(sc, BGE_EE_ADDR,
738 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
739 DELAY(20);
740
741 /* Issue the read EEPROM command. */
742 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
743
744 /* Wait for completion */
745 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
746 DELAY(10);
747 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
748 break;
749 }
750
751 if (i == BGE_TIMEOUT * 10) {
752 device_printf(sc->bge_dev, "EEPROM read timed out\n");
753 return (1);
754 }
755
756 /* Get result. */
757 byte = CSR_READ_4(sc, BGE_EE_DATA);
758
759 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
760
761 return (0);
762}
763
764/*
765 * Read a sequence of bytes from the EEPROM.
766 */
767static int
768bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
769{
770 int i, error = 0;
771 uint8_t byte = 0;
772
773 for (i = 0; i < cnt; i++) {
774 error = bge_eeprom_getbyte(sc, off + i, &byte);
775 if (error)
776 break;
777 *(dest + i) = byte;
778 }
779
780 return (error ? 1 : 0);
781}
782
783static int
784bge_miibus_readreg(device_t dev, int phy, int reg)
785{
786 struct bge_softc *sc;
787 uint32_t val;
788 int i;
789
790 sc = device_get_softc(dev);
791
792 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
793 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
794 CSR_WRITE_4(sc, BGE_MI_MODE,
795 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
796 DELAY(80);
797 }
798
799 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
800 BGE_MIPHY(phy) | BGE_MIREG(reg));
801
802 /* Poll for the PHY register access to complete. */
803 for (i = 0; i < BGE_TIMEOUT; i++) {
804 DELAY(10);
805 val = CSR_READ_4(sc, BGE_MI_COMM);
806 if ((val & BGE_MICOMM_BUSY) == 0) {
807 DELAY(5);
808 val = CSR_READ_4(sc, BGE_MI_COMM);
809 break;
810 }
811 }
812
813 if (i == BGE_TIMEOUT) {
814 device_printf(sc->bge_dev,
815 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
816 phy, reg, val);
817 val = 0;
818 }
819
820 /* Restore the autopoll bit if necessary. */
821 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
822 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
823 DELAY(80);
824 }
825
826 if (val & BGE_MICOMM_READFAIL)
827 return (0);
828
829 return (val & 0xFFFF);
830}
831
832static int
833bge_miibus_writereg(device_t dev, int phy, int reg, int val)
834{
835 struct bge_softc *sc;
836 int i;
837
838 sc = device_get_softc(dev);
839
840 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
841 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
842 return (0);
843
844 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
845 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
846 CSR_WRITE_4(sc, BGE_MI_MODE,
847 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
848 DELAY(80);
849 }
850
851 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
852 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
853
854 for (i = 0; i < BGE_TIMEOUT; i++) {
855 DELAY(10);
856 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
857 DELAY(5);
858 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
859 break;
860 }
861 }
862
863 /* Restore the autopoll bit if necessary. */
864 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
865 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
866 DELAY(80);
867 }
868
869 if (i == BGE_TIMEOUT)
870 device_printf(sc->bge_dev,
871 "PHY write timed out (phy %d, reg %d, val %d)\n",
872 phy, reg, val);
873
874 return (0);
875}
876
877static void
878bge_miibus_statchg(device_t dev)
879{
880 struct bge_softc *sc;
881 struct mii_data *mii;
882 sc = device_get_softc(dev);
883 mii = device_get_softc(sc->bge_miibus);
884
885 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
886 (IFM_ACTIVE | IFM_AVALID)) {
887 switch (IFM_SUBTYPE(mii->mii_media_active)) {
888 case IFM_10_T:
889 case IFM_100_TX:
890 sc->bge_link = 1;
891 break;
892 case IFM_1000_T:
893 case IFM_1000_SX:
894 case IFM_2500_SX:
895 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
896 sc->bge_link = 1;
897 else
898 sc->bge_link = 0;
899 break;
900 default:
901 sc->bge_link = 0;
902 break;
903 }
904 } else
905 sc->bge_link = 0;
906 if (sc->bge_link == 0)
907 return;
908 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
909 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
910 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
911 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
912 else
913 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
914
915 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
916 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
917 if ((IFM_OPTIONS(mii->mii_media_active) &
918 IFM_ETH_TXPAUSE) != 0)
919 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
920 else
921 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
922 if ((IFM_OPTIONS(mii->mii_media_active) &
923 IFM_ETH_RXPAUSE) != 0)
924 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
925 else
926 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
927 } else {
928 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
929 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
930 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
931 }
932}
933
934/*
935 * Intialize a standard receive ring descriptor.
936 */
937static int
938bge_newbuf_std(struct bge_softc *sc, int i)
939{
940 struct mbuf *m;
941 struct bge_rx_bd *r;
942 bus_dma_segment_t segs[1];
943 bus_dmamap_t map;
944 int error, nsegs;
945
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
220
221 { SK_VENDORID, SK_DEVICEID_ALTIMA },
222
223 { TC_VENDORID, TC_DEVICEID_3C996 },
224
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
226 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
227 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
228
229 { 0, 0 }
230};
231
232static const struct bge_vendor {
233 uint16_t v_id;
234 const char *v_name;
235} const bge_vendors[] = {
236 { ALTEON_VENDORID, "Alteon" },
237 { ALTIMA_VENDORID, "Altima" },
238 { APPLE_VENDORID, "Apple" },
239 { BCOM_VENDORID, "Broadcom" },
240 { SK_VENDORID, "SysKonnect" },
241 { TC_VENDORID, "3Com" },
242 { FJTSU_VENDORID, "Fujitsu" },
243
244 { 0, NULL }
245};
246
247static const struct bge_revision {
248 uint32_t br_chipid;
249 const char *br_name;
250} const bge_revisions[] = {
251 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
252 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
253 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
254 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
255 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
256 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
257 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
258 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
259 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
260 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
261 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
262 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
263 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
264 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
265 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
266 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
267 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
268 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
269 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
270 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
271 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
272 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
273 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
274 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
275 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
276 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
277 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
278 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
279 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
280 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
281 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
282 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
283 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
284 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
285 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
286 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
287 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
288 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
289 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
290 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
291 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
292 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
293 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
294 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
295 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
296 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
297 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
298 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
299 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
300 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
301 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
302 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
303 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
304 /* 5754 and 5787 share the same ASIC ID */
305 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
306 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
307 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
308 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
309 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
310 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
311 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
312
313 { 0, NULL }
314};
315
316/*
317 * Some defaults for major revisions, so that newer steppings
318 * that we don't know about have a shot at working.
319 */
320static const struct bge_revision const bge_majorrevs[] = {
321 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
322 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
323 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
324 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
325 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
326 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
327 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
328 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
329 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
330 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
331 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
332 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
333 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
334 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
335 /* 5754 and 5787 share the same ASIC ID */
336 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
337 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
338 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
339 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
340
341 { 0, NULL }
342};
343
344#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
345#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
346#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
347#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
348#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
349#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
350#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
351
352const struct bge_revision * bge_lookup_rev(uint32_t);
353const struct bge_vendor * bge_lookup_vendor(uint16_t);
354
355typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
356
357static int bge_probe(device_t);
358static int bge_attach(device_t);
359static int bge_detach(device_t);
360static int bge_suspend(device_t);
361static int bge_resume(device_t);
362static void bge_release_resources(struct bge_softc *);
363static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
364static int bge_dma_alloc(struct bge_softc *);
365static void bge_dma_free(struct bge_softc *);
366static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
367 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
368
369static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
370static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
371static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
372static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
373static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
374
375static void bge_txeof(struct bge_softc *, uint16_t);
376static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
377static int bge_rxeof(struct bge_softc *, uint16_t, int);
378
379static void bge_asf_driver_up (struct bge_softc *);
380static void bge_tick(void *);
381static void bge_stats_clear_regs(struct bge_softc *);
382static void bge_stats_update(struct bge_softc *);
383static void bge_stats_update_regs(struct bge_softc *);
384static struct mbuf *bge_check_short_dma(struct mbuf *);
385static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
386 uint16_t *, uint16_t *);
387static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
388
389static void bge_intr(void *);
390static int bge_msi_intr(void *);
391static void bge_intr_task(void *, int);
392static void bge_start_locked(struct ifnet *);
393static void bge_start(struct ifnet *);
394static int bge_ioctl(struct ifnet *, u_long, caddr_t);
395static void bge_init_locked(struct bge_softc *);
396static void bge_init(void *);
397static void bge_stop(struct bge_softc *);
398static void bge_watchdog(struct bge_softc *);
399static int bge_shutdown(device_t);
400static int bge_ifmedia_upd_locked(struct ifnet *);
401static int bge_ifmedia_upd(struct ifnet *);
402static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
403
404static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
405static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
406
407static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
408static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
409
410static void bge_setpromisc(struct bge_softc *);
411static void bge_setmulti(struct bge_softc *);
412static void bge_setvlan(struct bge_softc *);
413
414static __inline void bge_rxreuse_std(struct bge_softc *, int);
415static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
416static int bge_newbuf_std(struct bge_softc *, int);
417static int bge_newbuf_jumbo(struct bge_softc *, int);
418static int bge_init_rx_ring_std(struct bge_softc *);
419static void bge_free_rx_ring_std(struct bge_softc *);
420static int bge_init_rx_ring_jumbo(struct bge_softc *);
421static void bge_free_rx_ring_jumbo(struct bge_softc *);
422static void bge_free_tx_ring(struct bge_softc *);
423static int bge_init_tx_ring(struct bge_softc *);
424
425static int bge_chipinit(struct bge_softc *);
426static int bge_blockinit(struct bge_softc *);
427
428static int bge_has_eaddr(struct bge_softc *);
429static uint32_t bge_readmem_ind(struct bge_softc *, int);
430static void bge_writemem_ind(struct bge_softc *, int, int);
431static void bge_writembx(struct bge_softc *, int, int);
432#ifdef notdef
433static uint32_t bge_readreg_ind(struct bge_softc *, int);
434#endif
435static void bge_writemem_direct(struct bge_softc *, int, int);
436static void bge_writereg_ind(struct bge_softc *, int, int);
437
438static int bge_miibus_readreg(device_t, int, int);
439static int bge_miibus_writereg(device_t, int, int, int);
440static void bge_miibus_statchg(device_t);
441#ifdef DEVICE_POLLING
442static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
443#endif
444
445#define BGE_RESET_START 1
446#define BGE_RESET_STOP 2
447static void bge_sig_post_reset(struct bge_softc *, int);
448static void bge_sig_legacy(struct bge_softc *, int);
449static void bge_sig_pre_reset(struct bge_softc *, int);
450static void bge_stop_fw(struct bge_softc *);
451static int bge_reset(struct bge_softc *);
452static void bge_link_upd(struct bge_softc *);
453
454/*
455 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
456 * leak information to untrusted users. It is also known to cause alignment
457 * traps on certain architectures.
458 */
459#ifdef BGE_REGISTER_DEBUG
460static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
461static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
462static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
463#endif
464static void bge_add_sysctls(struct bge_softc *);
465static void bge_add_sysctl_stats_regs(struct bge_softc *,
466 struct sysctl_ctx_list *, struct sysctl_oid_list *);
467static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
468 struct sysctl_oid_list *);
469static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
470
471static device_method_t bge_methods[] = {
472 /* Device interface */
473 DEVMETHOD(device_probe, bge_probe),
474 DEVMETHOD(device_attach, bge_attach),
475 DEVMETHOD(device_detach, bge_detach),
476 DEVMETHOD(device_shutdown, bge_shutdown),
477 DEVMETHOD(device_suspend, bge_suspend),
478 DEVMETHOD(device_resume, bge_resume),
479
480 /* bus interface */
481 DEVMETHOD(bus_print_child, bus_generic_print_child),
482 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
483
484 /* MII interface */
485 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
486 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
487 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
488
489 { 0, 0 }
490};
491
492static driver_t bge_driver = {
493 "bge",
494 bge_methods,
495 sizeof(struct bge_softc)
496};
497
498static devclass_t bge_devclass;
499
500DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
501DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
502
503static int bge_allow_asf = 1;
504
505TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
506
507SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
508SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
509 "Allow ASF mode if available");
510
511#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
512#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
513#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
514#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
515#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
516
517static int
518bge_has_eaddr(struct bge_softc *sc)
519{
520#ifdef __sparc64__
521 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
522 device_t dev;
523 uint32_t subvendor;
524
525 dev = sc->bge_dev;
526
527 /*
528 * The on-board BGEs found in sun4u machines aren't fitted with
529 * an EEPROM which means that we have to obtain the MAC address
530 * via OFW and that some tests will always fail. We distinguish
531 * such BGEs by the subvendor ID, which also has to be obtained
532 * from OFW instead of the PCI configuration space as the latter
533 * indicates Broadcom as the subvendor of the netboot interface.
534 * For early Blade 1500 and 2500 we even have to check the OFW
535 * device path as the subvendor ID always defaults to Broadcom
536 * there.
537 */
538 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
539 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
540 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
541 return (0);
542 memset(buf, 0, sizeof(buf));
543 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
544 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
545 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
546 return (0);
547 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
548 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
549 return (0);
550 }
551#endif
552 return (1);
553}
554
555static uint32_t
556bge_readmem_ind(struct bge_softc *sc, int off)
557{
558 device_t dev;
559 uint32_t val;
560
561 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
562 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
563 return (0);
564
565 dev = sc->bge_dev;
566
567 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
568 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
569 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
570 return (val);
571}
572
573static void
574bge_writemem_ind(struct bge_softc *sc, int off, int val)
575{
576 device_t dev;
577
578 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
579 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
580 return;
581
582 dev = sc->bge_dev;
583
584 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
585 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
586 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
587}
588
589#ifdef notdef
590static uint32_t
591bge_readreg_ind(struct bge_softc *sc, int off)
592{
593 device_t dev;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
598 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
599}
600#endif
601
602static void
603bge_writereg_ind(struct bge_softc *sc, int off, int val)
604{
605 device_t dev;
606
607 dev = sc->bge_dev;
608
609 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
611}
612
613static void
614bge_writemem_direct(struct bge_softc *sc, int off, int val)
615{
616 CSR_WRITE_4(sc, off, val);
617}
618
619static void
620bge_writembx(struct bge_softc *sc, int off, int val)
621{
622 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
623 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624
625 CSR_WRITE_4(sc, off, val);
626}
627
628/*
629 * Map a single buffer address.
630 */
631
632static void
633bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
634{
635 struct bge_dmamap_arg *ctx;
636
637 if (error)
638 return;
639
640 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
641
642 ctx = arg;
643 ctx->bge_busaddr = segs->ds_addr;
644}
645
646static uint8_t
647bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
648{
649 uint32_t access, byte = 0;
650 int i;
651
652 /* Lock. */
653 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
654 for (i = 0; i < 8000; i++) {
655 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
656 break;
657 DELAY(20);
658 }
659 if (i == 8000)
660 return (1);
661
662 /* Enable access. */
663 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
664 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
665
666 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
667 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
668 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
669 DELAY(10);
670 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
671 DELAY(10);
672 break;
673 }
674 }
675
676 if (i == BGE_TIMEOUT * 10) {
677 if_printf(sc->bge_ifp, "nvram read timed out\n");
678 return (1);
679 }
680
681 /* Get result. */
682 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
683
684 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
685
686 /* Disable access. */
687 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
688
689 /* Unlock. */
690 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
691 CSR_READ_4(sc, BGE_NVRAM_SWARB);
692
693 return (0);
694}
695
696/*
697 * Read a sequence of bytes from NVRAM.
698 */
699static int
700bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
701{
702 int err = 0, i;
703 uint8_t byte = 0;
704
705 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
706 return (1);
707
708 for (i = 0; i < cnt; i++) {
709 err = bge_nvram_getbyte(sc, off + i, &byte);
710 if (err)
711 break;
712 *(dest + i) = byte;
713 }
714
715 return (err ? 1 : 0);
716}
717
718/*
719 * Read a byte of data stored in the EEPROM at address 'addr.' The
720 * BCM570x supports both the traditional bitbang interface and an
721 * auto access interface for reading the EEPROM. We use the auto
722 * access method.
723 */
724static uint8_t
725bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
726{
727 int i;
728 uint32_t byte = 0;
729
730 /*
731 * Enable use of auto EEPROM access so we can avoid
732 * having to use the bitbang method.
733 */
734 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
735
736 /* Reset the EEPROM, load the clock period. */
737 CSR_WRITE_4(sc, BGE_EE_ADDR,
738 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
739 DELAY(20);
740
741 /* Issue the read EEPROM command. */
742 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
743
744 /* Wait for completion */
745 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
746 DELAY(10);
747 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
748 break;
749 }
750
751 if (i == BGE_TIMEOUT * 10) {
752 device_printf(sc->bge_dev, "EEPROM read timed out\n");
753 return (1);
754 }
755
756 /* Get result. */
757 byte = CSR_READ_4(sc, BGE_EE_DATA);
758
759 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
760
761 return (0);
762}
763
764/*
765 * Read a sequence of bytes from the EEPROM.
766 */
767static int
768bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
769{
770 int i, error = 0;
771 uint8_t byte = 0;
772
773 for (i = 0; i < cnt; i++) {
774 error = bge_eeprom_getbyte(sc, off + i, &byte);
775 if (error)
776 break;
777 *(dest + i) = byte;
778 }
779
780 return (error ? 1 : 0);
781}
782
783static int
784bge_miibus_readreg(device_t dev, int phy, int reg)
785{
786 struct bge_softc *sc;
787 uint32_t val;
788 int i;
789
790 sc = device_get_softc(dev);
791
792 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
793 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
794 CSR_WRITE_4(sc, BGE_MI_MODE,
795 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
796 DELAY(80);
797 }
798
799 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
800 BGE_MIPHY(phy) | BGE_MIREG(reg));
801
802 /* Poll for the PHY register access to complete. */
803 for (i = 0; i < BGE_TIMEOUT; i++) {
804 DELAY(10);
805 val = CSR_READ_4(sc, BGE_MI_COMM);
806 if ((val & BGE_MICOMM_BUSY) == 0) {
807 DELAY(5);
808 val = CSR_READ_4(sc, BGE_MI_COMM);
809 break;
810 }
811 }
812
813 if (i == BGE_TIMEOUT) {
814 device_printf(sc->bge_dev,
815 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
816 phy, reg, val);
817 val = 0;
818 }
819
820 /* Restore the autopoll bit if necessary. */
821 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
822 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
823 DELAY(80);
824 }
825
826 if (val & BGE_MICOMM_READFAIL)
827 return (0);
828
829 return (val & 0xFFFF);
830}
831
832static int
833bge_miibus_writereg(device_t dev, int phy, int reg, int val)
834{
835 struct bge_softc *sc;
836 int i;
837
838 sc = device_get_softc(dev);
839
840 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
841 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
842 return (0);
843
844 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
845 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
846 CSR_WRITE_4(sc, BGE_MI_MODE,
847 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
848 DELAY(80);
849 }
850
851 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
852 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
853
854 for (i = 0; i < BGE_TIMEOUT; i++) {
855 DELAY(10);
856 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
857 DELAY(5);
858 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
859 break;
860 }
861 }
862
863 /* Restore the autopoll bit if necessary. */
864 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
865 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
866 DELAY(80);
867 }
868
869 if (i == BGE_TIMEOUT)
870 device_printf(sc->bge_dev,
871 "PHY write timed out (phy %d, reg %d, val %d)\n",
872 phy, reg, val);
873
874 return (0);
875}
876
877static void
878bge_miibus_statchg(device_t dev)
879{
880 struct bge_softc *sc;
881 struct mii_data *mii;
882 sc = device_get_softc(dev);
883 mii = device_get_softc(sc->bge_miibus);
884
885 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
886 (IFM_ACTIVE | IFM_AVALID)) {
887 switch (IFM_SUBTYPE(mii->mii_media_active)) {
888 case IFM_10_T:
889 case IFM_100_TX:
890 sc->bge_link = 1;
891 break;
892 case IFM_1000_T:
893 case IFM_1000_SX:
894 case IFM_2500_SX:
895 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
896 sc->bge_link = 1;
897 else
898 sc->bge_link = 0;
899 break;
900 default:
901 sc->bge_link = 0;
902 break;
903 }
904 } else
905 sc->bge_link = 0;
906 if (sc->bge_link == 0)
907 return;
908 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
909 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
910 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
911 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
912 else
913 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
914
915 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
916 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
917 if ((IFM_OPTIONS(mii->mii_media_active) &
918 IFM_ETH_TXPAUSE) != 0)
919 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
920 else
921 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
922 if ((IFM_OPTIONS(mii->mii_media_active) &
923 IFM_ETH_RXPAUSE) != 0)
924 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
925 else
926 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
927 } else {
928 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
929 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
930 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
931 }
932}
933
934/*
935 * Intialize a standard receive ring descriptor.
936 */
937static int
938bge_newbuf_std(struct bge_softc *sc, int i)
939{
940 struct mbuf *m;
941 struct bge_rx_bd *r;
942 bus_dma_segment_t segs[1];
943 bus_dmamap_t map;
944 int error, nsegs;
945
946 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
947 if (m == NULL)
948 return (ENOBUFS);
949 m->m_len = m->m_pkthdr.len = MCLBYTES;
946 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
947 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
948 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
949 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
950 if (m == NULL)
951 return (ENOBUFS);
952 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
953 } else {
954 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
955 if (m == NULL)
956 return (ENOBUFS);
957 m->m_len = m->m_pkthdr.len = MCLBYTES;
958 }
950 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
951 m_adj(m, ETHER_ALIGN);
952
953 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
954 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
955 if (error != 0) {
956 m_freem(m);
957 return (error);
958 }
959 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
960 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
961 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
962 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
963 sc->bge_cdata.bge_rx_std_dmamap[i]);
964 }
965 map = sc->bge_cdata.bge_rx_std_dmamap[i];
966 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
967 sc->bge_cdata.bge_rx_std_sparemap = map;
968 sc->bge_cdata.bge_rx_std_chain[i] = m;
969 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
970 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
971 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
972 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
973 r->bge_flags = BGE_RXBDFLAG_END;
974 r->bge_len = segs[0].ds_len;
975 r->bge_idx = i;
976
977 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
978 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
979
980 return (0);
981}
982
983/*
984 * Initialize a jumbo receive ring descriptor. This allocates
985 * a jumbo buffer from the pool managed internally by the driver.
986 */
987static int
988bge_newbuf_jumbo(struct bge_softc *sc, int i)
989{
990 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
991 bus_dmamap_t map;
992 struct bge_extrx_bd *r;
993 struct mbuf *m;
994 int error, nsegs;
995
996 MGETHDR(m, M_DONTWAIT, MT_DATA);
997 if (m == NULL)
998 return (ENOBUFS);
999
1000 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1001 if (!(m->m_flags & M_EXT)) {
1002 m_freem(m);
1003 return (ENOBUFS);
1004 }
1005 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1006 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1007 m_adj(m, ETHER_ALIGN);
1008
1009 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1010 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1011 if (error != 0) {
1012 m_freem(m);
1013 return (error);
1014 }
1015
1016 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1017 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1018 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1019 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1020 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1021 }
1022 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1023 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1024 sc->bge_cdata.bge_rx_jumbo_sparemap;
1025 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1026 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1027 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1028 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1029 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1030 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1031
1032 /*
1033 * Fill in the extended RX buffer descriptor.
1034 */
1035 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1036 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1037 r->bge_idx = i;
1038 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1039 switch (nsegs) {
1040 case 4:
1041 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1042 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1043 r->bge_len3 = segs[3].ds_len;
1044 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1045 case 3:
1046 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1047 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1048 r->bge_len2 = segs[2].ds_len;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1050 case 2:
1051 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1052 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1053 r->bge_len1 = segs[1].ds_len;
1054 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1055 case 1:
1056 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1057 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1058 r->bge_len0 = segs[0].ds_len;
1059 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1060 break;
1061 default:
1062 panic("%s: %d segments\n", __func__, nsegs);
1063 }
1064
1065 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1066 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1067
1068 return (0);
1069}
1070
1071static int
1072bge_init_rx_ring_std(struct bge_softc *sc)
1073{
1074 int error, i;
1075
1076 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1077 sc->bge_std = 0;
1078 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1079 if ((error = bge_newbuf_std(sc, i)) != 0)
1080 return (error);
1081 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1082 }
1083
1084 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1085 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1086
1087 sc->bge_std = 0;
1088 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1089
1090 return (0);
1091}
1092
1093static void
1094bge_free_rx_ring_std(struct bge_softc *sc)
1095{
1096 int i;
1097
1098 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1099 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1100 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1101 sc->bge_cdata.bge_rx_std_dmamap[i],
1102 BUS_DMASYNC_POSTREAD);
1103 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1104 sc->bge_cdata.bge_rx_std_dmamap[i]);
1105 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1106 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1107 }
1108 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1109 sizeof(struct bge_rx_bd));
1110 }
1111}
1112
1113static int
1114bge_init_rx_ring_jumbo(struct bge_softc *sc)
1115{
1116 struct bge_rcb *rcb;
1117 int error, i;
1118
1119 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1120 sc->bge_jumbo = 0;
1121 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1122 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1123 return (error);
1124 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1125 }
1126
1127 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1128 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1129
1130 sc->bge_jumbo = 0;
1131
1132 /* Enable the jumbo receive producer ring. */
1133 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1134 rcb->bge_maxlen_flags =
1135 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1136 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1137
1138 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1139
1140 return (0);
1141}
1142
1143static void
1144bge_free_rx_ring_jumbo(struct bge_softc *sc)
1145{
1146 int i;
1147
1148 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1149 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1150 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1151 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1152 BUS_DMASYNC_POSTREAD);
1153 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1154 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1155 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1156 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1157 }
1158 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1159 sizeof(struct bge_extrx_bd));
1160 }
1161}
1162
1163static void
1164bge_free_tx_ring(struct bge_softc *sc)
1165{
1166 int i;
1167
1168 if (sc->bge_ldata.bge_tx_ring == NULL)
1169 return;
1170
1171 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1172 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1173 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1174 sc->bge_cdata.bge_tx_dmamap[i],
1175 BUS_DMASYNC_POSTWRITE);
1176 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1177 sc->bge_cdata.bge_tx_dmamap[i]);
1178 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1179 sc->bge_cdata.bge_tx_chain[i] = NULL;
1180 }
1181 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1182 sizeof(struct bge_tx_bd));
1183 }
1184}
1185
1186static int
1187bge_init_tx_ring(struct bge_softc *sc)
1188{
1189 sc->bge_txcnt = 0;
1190 sc->bge_tx_saved_considx = 0;
1191
1192 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1193 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1194 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1195
1196 /* Initialize transmit producer index for host-memory send ring. */
1197 sc->bge_tx_prodidx = 0;
1198 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1199
1200 /* 5700 b2 errata */
1201 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1202 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1203
1204 /* NIC-memory send ring not used; initialize to zero. */
1205 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1206 /* 5700 b2 errata */
1207 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1208 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1209
1210 return (0);
1211}
1212
1213static void
1214bge_setpromisc(struct bge_softc *sc)
1215{
1216 struct ifnet *ifp;
1217
1218 BGE_LOCK_ASSERT(sc);
1219
1220 ifp = sc->bge_ifp;
1221
1222 /* Enable or disable promiscuous mode as needed. */
1223 if (ifp->if_flags & IFF_PROMISC)
1224 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1225 else
1226 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1227}
1228
1229static void
1230bge_setmulti(struct bge_softc *sc)
1231{
1232 struct ifnet *ifp;
1233 struct ifmultiaddr *ifma;
1234 uint32_t hashes[4] = { 0, 0, 0, 0 };
1235 int h, i;
1236
1237 BGE_LOCK_ASSERT(sc);
1238
1239 ifp = sc->bge_ifp;
1240
1241 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1242 for (i = 0; i < 4; i++)
1243 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1244 return;
1245 }
1246
1247 /* First, zot all the existing filters. */
1248 for (i = 0; i < 4; i++)
1249 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1250
1251 /* Now program new ones. */
1252 if_maddr_rlock(ifp);
1253 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1254 if (ifma->ifma_addr->sa_family != AF_LINK)
1255 continue;
1256 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1257 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1258 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1259 }
1260 if_maddr_runlock(ifp);
1261
1262 for (i = 0; i < 4; i++)
1263 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1264}
1265
1266static void
1267bge_setvlan(struct bge_softc *sc)
1268{
1269 struct ifnet *ifp;
1270
1271 BGE_LOCK_ASSERT(sc);
1272
1273 ifp = sc->bge_ifp;
1274
1275 /* Enable or disable VLAN tag stripping as needed. */
1276 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1277 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1278 else
1279 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1280}
1281
1282static void
1283bge_sig_pre_reset(struct bge_softc *sc, int type)
1284{
1285
1286 /*
1287 * Some chips don't like this so only do this if ASF is enabled
1288 */
1289 if (sc->bge_asf_mode)
1290 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1291
1292 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1293 switch (type) {
1294 case BGE_RESET_START:
1295 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1296 break;
1297 case BGE_RESET_STOP:
1298 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1299 break;
1300 }
1301 }
1302}
1303
1304static void
1305bge_sig_post_reset(struct bge_softc *sc, int type)
1306{
1307
1308 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1309 switch (type) {
1310 case BGE_RESET_START:
1311 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1312 /* START DONE */
1313 break;
1314 case BGE_RESET_STOP:
1315 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1316 break;
1317 }
1318 }
1319}
1320
1321static void
1322bge_sig_legacy(struct bge_softc *sc, int type)
1323{
1324
1325 if (sc->bge_asf_mode) {
1326 switch (type) {
1327 case BGE_RESET_START:
1328 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1329 break;
1330 case BGE_RESET_STOP:
1331 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1332 break;
1333 }
1334 }
1335}
1336
1337static void
1338bge_stop_fw(struct bge_softc *sc)
1339{
1340 int i;
1341
1342 if (sc->bge_asf_mode) {
1343 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1344 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1345 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1346
1347 for (i = 0; i < 100; i++ ) {
1348 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1349 break;
1350 DELAY(10);
1351 }
1352 }
1353}
1354
1355/*
1356 * Do endian, PCI and DMA initialization.
1357 */
1358static int
1359bge_chipinit(struct bge_softc *sc)
1360{
1361 uint32_t dma_rw_ctl, misc_ctl;
1362 uint16_t val;
1363 int i;
1364
1365 /* Set endianness before we access any non-PCI registers. */
1366 misc_ctl = BGE_INIT;
1367 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1368 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1369 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1370
1371 /* Clear the MAC control register */
1372 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1373
1374 /*
1375 * Clear the MAC statistics block in the NIC's
1376 * internal memory.
1377 */
1378 for (i = BGE_STATS_BLOCK;
1379 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1380 BGE_MEMWIN_WRITE(sc, i, 0);
1381
1382 for (i = BGE_STATUS_BLOCK;
1383 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1384 BGE_MEMWIN_WRITE(sc, i, 0);
1385
1386 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1387 /*
1388 * Fix data corruption caused by non-qword write with WB.
1389 * Fix master abort in PCI mode.
1390 * Fix PCI latency timer.
1391 */
1392 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1393 val |= (1 << 10) | (1 << 12) | (1 << 13);
1394 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1395 }
1396
1397 /*
1398 * Set up the PCI DMA control register.
1399 */
1400 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1401 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1402 if (sc->bge_flags & BGE_FLAG_PCIE) {
1403 /* Read watermark not used, 128 bytes for write. */
1404 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1405 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1406 if (BGE_IS_5714_FAMILY(sc)) {
1407 /* 256 bytes for read and write. */
1408 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1409 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1410 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1411 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1412 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1413 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1414 /*
1415 * In the BCM5703, the DMA read watermark should
1416 * be set to less than or equal to the maximum
1417 * memory read byte count of the PCI-X command
1418 * register.
1419 */
1420 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1421 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1422 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1423 /* 1536 bytes for read, 384 bytes for write. */
1424 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1425 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1426 } else {
1427 /* 384 bytes for read and write. */
1428 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1429 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1430 0x0F;
1431 }
1432 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1433 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1434 uint32_t tmp;
1435
1436 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1437 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1438 if (tmp == 6 || tmp == 7)
1439 dma_rw_ctl |=
1440 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1441
1442 /* Set PCI-X DMA write workaround. */
1443 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1444 }
1445 } else {
1446 /* Conventional PCI bus: 256 bytes for read and write. */
1447 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1448 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1449
1450 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1451 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1452 dma_rw_ctl |= 0x0F;
1453 }
1454 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1455 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1456 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1457 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1458 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1459 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1460 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1461 if (BGE_IS_5717_PLUS(sc))
1462 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1463 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1464
1465 /*
1466 * Set up general mode register.
1467 */
1468 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1469 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1470 BGE_MODECTL_TX_NO_PHDR_CSUM);
1471
1472 /*
1473 * BCM5701 B5 have a bug causing data corruption when using
1474 * 64-bit DMA reads, which can be terminated early and then
1475 * completed later as 32-bit accesses, in combination with
1476 * certain bridges.
1477 */
1478 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1479 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1480 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1481
1482 /*
1483 * Tell the firmware the driver is running
1484 */
1485 if (sc->bge_asf_mode & ASF_STACKUP)
1486 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1487
1488 /*
1489 * Disable memory write invalidate. Apparently it is not supported
1490 * properly by these devices. Also ensure that INTx isn't disabled,
1491 * as these chips need it even when using MSI.
1492 */
1493 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1494 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1495
1496 /* Set the timer prescaler (always 66Mhz) */
1497 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1498
1499 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1500 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1501 DELAY(40); /* XXX */
1502
1503 /* Put PHY into ready state */
1504 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1505 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1506 DELAY(40);
1507 }
1508
1509 return (0);
1510}
1511
1512static int
1513bge_blockinit(struct bge_softc *sc)
1514{
1515 struct bge_rcb *rcb;
1516 bus_size_t vrcb;
1517 bge_hostaddr taddr;
1518 uint32_t val;
1519 int i, limit;
1520
1521 /*
1522 * Initialize the memory window pointer register so that
1523 * we can access the first 32K of internal NIC RAM. This will
1524 * allow us to set up the TX send ring RCBs and the RX return
1525 * ring RCBs, plus other things which live in NIC memory.
1526 */
1527 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1528
1529 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1530
1531 if (!(BGE_IS_5705_PLUS(sc))) {
1532 /* Configure mbuf memory pool */
1533 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1534 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1535 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1536 else
1537 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1538
1539 /* Configure DMA resource pool */
1540 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1541 BGE_DMA_DESCRIPTORS);
1542 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1543 }
1544
1545 /* Configure mbuf pool watermarks */
1546 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1547 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1548 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1549 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1550 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1551 } else {
1552 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1553 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1554 }
1555 } else if (!BGE_IS_5705_PLUS(sc)) {
1556 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1557 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1558 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1559 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1560 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1561 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1562 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1563 } else {
1564 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1565 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1566 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1567 }
1568
1569 /* Configure DMA resource watermarks */
1570 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1571 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1572
1573 /* Enable buffer manager */
1574 if (!(BGE_IS_5705_PLUS(sc))) {
1575 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1576 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1577
1578 /* Poll for buffer manager start indication */
1579 for (i = 0; i < BGE_TIMEOUT; i++) {
1580 DELAY(10);
1581 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1582 break;
1583 }
1584
1585 if (i == BGE_TIMEOUT) {
1586 device_printf(sc->bge_dev,
1587 "buffer manager failed to start\n");
1588 return (ENXIO);
1589 }
1590 }
1591
1592 /* Enable flow-through queues */
1593 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1594 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1595
1596 /* Wait until queue initialization is complete */
1597 for (i = 0; i < BGE_TIMEOUT; i++) {
1598 DELAY(10);
1599 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1600 break;
1601 }
1602
1603 if (i == BGE_TIMEOUT) {
1604 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1605 return (ENXIO);
1606 }
1607
1608 /*
1609 * Summary of rings supported by the controller:
1610 *
1611 * Standard Receive Producer Ring
1612 * - This ring is used to feed receive buffers for "standard"
1613 * sized frames (typically 1536 bytes) to the controller.
1614 *
1615 * Jumbo Receive Producer Ring
1616 * - This ring is used to feed receive buffers for jumbo sized
1617 * frames (i.e. anything bigger than the "standard" frames)
1618 * to the controller.
1619 *
1620 * Mini Receive Producer Ring
1621 * - This ring is used to feed receive buffers for "mini"
1622 * sized frames to the controller.
1623 * - This feature required external memory for the controller
1624 * but was never used in a production system. Should always
1625 * be disabled.
1626 *
1627 * Receive Return Ring
1628 * - After the controller has placed an incoming frame into a
1629 * receive buffer that buffer is moved into a receive return
1630 * ring. The driver is then responsible to passing the
1631 * buffer up to the stack. Many versions of the controller
1632 * support multiple RR rings.
1633 *
1634 * Send Ring
1635 * - This ring is used for outgoing frames. Many versions of
1636 * the controller support multiple send rings.
1637 */
1638
1639 /* Initialize the standard receive producer ring control block. */
1640 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1641 rcb->bge_hostaddr.bge_addr_lo =
1642 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1643 rcb->bge_hostaddr.bge_addr_hi =
1644 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1645 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1646 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1647 if (BGE_IS_5717_PLUS(sc)) {
1648 /*
1649 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1650 * Bits 15-2 : Maximum RX frame size
1651 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1652 * Bit 0 : Reserved
1653 */
1654 rcb->bge_maxlen_flags =
1655 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1656 } else if (BGE_IS_5705_PLUS(sc)) {
1657 /*
1658 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1659 * Bits 15-2 : Reserved (should be 0)
1660 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1661 * Bit 0 : Reserved
1662 */
1663 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1664 } else {
1665 /*
1666 * Ring size is always XXX entries
1667 * Bits 31-16: Maximum RX frame size
1668 * Bits 15-2 : Reserved (should be 0)
1669 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1670 * Bit 0 : Reserved
1671 */
1672 rcb->bge_maxlen_flags =
1673 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1674 }
1675 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1676 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1677 else
1678 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1679 /* Write the standard receive producer ring control block. */
1680 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1681 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1682 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1683 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1684
1685 /* Reset the standard receive producer ring producer index. */
1686 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1687
1688 /*
1689 * Initialize the jumbo RX producer ring control
1690 * block. We set the 'ring disabled' bit in the
1691 * flags field until we're actually ready to start
1692 * using this ring (i.e. once we set the MTU
1693 * high enough to require it).
1694 */
1695 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1696 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1697 /* Get the jumbo receive producer ring RCB parameters. */
1698 rcb->bge_hostaddr.bge_addr_lo =
1699 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1700 rcb->bge_hostaddr.bge_addr_hi =
1701 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1702 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1703 sc->bge_cdata.bge_rx_jumbo_ring_map,
1704 BUS_DMASYNC_PREREAD);
1705 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1706 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1707 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1708 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1709 else
1710 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1711 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1712 rcb->bge_hostaddr.bge_addr_hi);
1713 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1714 rcb->bge_hostaddr.bge_addr_lo);
1715 /* Program the jumbo receive producer ring RCB parameters. */
1716 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1717 rcb->bge_maxlen_flags);
1718 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1719 /* Reset the jumbo receive producer ring producer index. */
1720 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1721 }
1722
1723 /* Disable the mini receive producer ring RCB. */
1724 if (BGE_IS_5700_FAMILY(sc)) {
1725 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1726 rcb->bge_maxlen_flags =
1727 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1728 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1729 rcb->bge_maxlen_flags);
1730 /* Reset the mini receive producer ring producer index. */
1731 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1732 }
1733
1734 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1735 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1736 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1737 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1738 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1739 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1740 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1741 }
1742 /*
1743 * The BD ring replenish thresholds control how often the
1744 * hardware fetches new BD's from the producer rings in host
1745 * memory. Setting the value too low on a busy system can
1746 * starve the hardware and recue the throughpout.
1747 *
1748 * Set the BD ring replentish thresholds. The recommended
1749 * values are 1/8th the number of descriptors allocated to
1750 * each ring.
1751 * XXX The 5754 requires a lower threshold, so it might be a
1752 * requirement of all 575x family chips. The Linux driver sets
1753 * the lower threshold for all 5705 family chips as well, but there
1754 * are reports that it might not need to be so strict.
1755 *
1756 * XXX Linux does some extra fiddling here for the 5906 parts as
1757 * well.
1758 */
1759 if (BGE_IS_5705_PLUS(sc))
1760 val = 8;
1761 else
1762 val = BGE_STD_RX_RING_CNT / 8;
1763 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1764 if (BGE_IS_JUMBO_CAPABLE(sc))
1765 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1766 BGE_JUMBO_RX_RING_CNT/8);
1767 if (BGE_IS_5717_PLUS(sc)) {
1768 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1769 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1770 }
1771
1772 /*
1773 * Disable all send rings by setting the 'ring disabled' bit
1774 * in the flags field of all the TX send ring control blocks,
1775 * located in NIC memory.
1776 */
1777 if (!BGE_IS_5705_PLUS(sc))
1778 /* 5700 to 5704 had 16 send rings. */
1779 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1780 else
1781 limit = 1;
1782 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1783 for (i = 0; i < limit; i++) {
1784 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1785 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1786 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1787 vrcb += sizeof(struct bge_rcb);
1788 }
1789
1790 /* Configure send ring RCB 0 (we use only the first ring) */
1791 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1792 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1793 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1794 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1795 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1796 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1797 else
1798 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1799 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1800 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1801 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1802
1803 /*
1804 * Disable all receive return rings by setting the
1805 * 'ring diabled' bit in the flags field of all the receive
1806 * return ring control blocks, located in NIC memory.
1807 */
1808 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1809 /* Should be 17, use 16 until we get an SRAM map. */
1810 limit = 16;
1811 } else if (!BGE_IS_5705_PLUS(sc))
1812 limit = BGE_RX_RINGS_MAX;
1813 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1814 limit = 4;
1815 else
1816 limit = 1;
1817 /* Disable all receive return rings. */
1818 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1819 for (i = 0; i < limit; i++) {
1820 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1821 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1822 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1823 BGE_RCB_FLAG_RING_DISABLED);
1824 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1825 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1826 (i * (sizeof(uint64_t))), 0);
1827 vrcb += sizeof(struct bge_rcb);
1828 }
1829
1830 /*
1831 * Set up receive return ring 0. Note that the NIC address
1832 * for RX return rings is 0x0. The return rings live entirely
1833 * within the host, so the nicaddr field in the RCB isn't used.
1834 */
1835 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1836 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1837 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1838 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1839 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1840 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1841 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1842
1843 /* Set random backoff seed for TX */
1844 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1845 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1846 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1847 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1848 BGE_TX_BACKOFF_SEED_MASK);
1849
1850 /* Set inter-packet gap */
1851 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1852
1853 /*
1854 * Specify which ring to use for packets that don't match
1855 * any RX rules.
1856 */
1857 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1858
1859 /*
1860 * Configure number of RX lists. One interrupt distribution
1861 * list, sixteen active lists, one bad frames class.
1862 */
1863 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1864
1865 /* Inialize RX list placement stats mask. */
1866 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1867 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1868
1869 /* Disable host coalescing until we get it set up */
1870 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1871
1872 /* Poll to make sure it's shut down. */
1873 for (i = 0; i < BGE_TIMEOUT; i++) {
1874 DELAY(10);
1875 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1876 break;
1877 }
1878
1879 if (i == BGE_TIMEOUT) {
1880 device_printf(sc->bge_dev,
1881 "host coalescing engine failed to idle\n");
1882 return (ENXIO);
1883 }
1884
1885 /* Set up host coalescing defaults */
1886 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1887 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1888 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1889 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1890 if (!(BGE_IS_5705_PLUS(sc))) {
1891 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1892 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1893 }
1894 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1895 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1896
1897 /* Set up address of statistics block */
1898 if (!(BGE_IS_5705_PLUS(sc))) {
1899 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1900 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1901 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1902 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1903 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1904 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1905 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1906 }
1907
1908 /* Set up address of status block */
1909 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1910 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1911 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1912 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1913
1914 /* Set up status block size. */
1915 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1916 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1917 val = BGE_STATBLKSZ_FULL;
1918 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1919 } else {
1920 val = BGE_STATBLKSZ_32BYTE;
1921 bzero(sc->bge_ldata.bge_status_block, 32);
1922 }
1923 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1924 sc->bge_cdata.bge_status_map,
1925 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1926
1927 /* Turn on host coalescing state machine */
1928 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1929
1930 /* Turn on RX BD completion state machine and enable attentions */
1931 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1932 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1933
1934 /* Turn on RX list placement state machine */
1935 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1936
1937 /* Turn on RX list selector state machine. */
1938 if (!(BGE_IS_5705_PLUS(sc)))
1939 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1940
1941 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1942 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1943 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1944 BGE_MACMODE_FRMHDR_DMA_ENB;
1945
1946 if (sc->bge_flags & BGE_FLAG_TBI)
1947 val |= BGE_PORTMODE_TBI;
1948 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1949 val |= BGE_PORTMODE_GMII;
1950 else
1951 val |= BGE_PORTMODE_MII;
1952
1953 /* Turn on DMA, clear stats */
1954 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1955
1956 /* Set misc. local control, enable interrupts on attentions */
1957 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1958
1959#ifdef notdef
1960 /* Assert GPIO pins for PHY reset */
1961 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1962 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1963 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1964 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1965#endif
1966
1967 /* Turn on DMA completion state machine */
1968 if (!(BGE_IS_5705_PLUS(sc)))
1969 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1970
1971 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1972
1973 /* Enable host coalescing bug fix. */
1974 if (BGE_IS_5755_PLUS(sc))
1975 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1976
1977 /* Request larger DMA burst size to get better performance. */
1978 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1979 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1980
1981 /* Turn on write DMA state machine */
1982 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1983 DELAY(40);
1984
1985 /* Turn on read DMA state machine */
1986 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1987
1988 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1989 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1990
1991 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1992 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1993 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1994 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1995 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1996 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1997 if (sc->bge_flags & BGE_FLAG_PCIE)
1998 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1999 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2000 val |= BGE_RDMAMODE_TSO4_ENABLE;
2001 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2002 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2003 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2004 val |= BGE_RDMAMODE_TSO6_ENABLE;
2005 }
2006 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2007 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2008 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2009 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2010 BGE_IS_5717_PLUS(sc)) {
2011 /*
2012 * Enable fix for read DMA FIFO overruns.
2013 * The fix is to limit the number of RX BDs
2014 * the hardware would fetch at a fime.
2015 */
2016 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
2017 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
2018 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2019 }
2020 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2021 DELAY(40);
2022
2023 /* Turn on RX data completion state machine */
2024 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2025
2026 /* Turn on RX BD initiator state machine */
2027 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2028
2029 /* Turn on RX data and RX BD initiator state machine */
2030 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2031
2032 /* Turn on Mbuf cluster free state machine */
2033 if (!(BGE_IS_5705_PLUS(sc)))
2034 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2035
2036 /* Turn on send BD completion state machine */
2037 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2038
2039 /* Turn on send data completion state machine */
2040 val = BGE_SDCMODE_ENABLE;
2041 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2042 val |= BGE_SDCMODE_CDELAY;
2043 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2044
2045 /* Turn on send data initiator state machine */
2046 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2047 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2048 BGE_SDIMODE_HW_LSO_PRE_DMA);
2049 else
2050 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2051
2052 /* Turn on send BD initiator state machine */
2053 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2054
2055 /* Turn on send BD selector state machine */
2056 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2057
2058 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2059 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2060 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2061
2062 /* ack/clear link change events */
2063 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2064 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2065 BGE_MACSTAT_LINK_CHANGED);
2066 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2067
2068 /*
2069 * Enable attention when the link has changed state for
2070 * devices that use auto polling.
2071 */
2072 if (sc->bge_flags & BGE_FLAG_TBI) {
2073 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2074 } else {
2075 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2076 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2077 DELAY(80);
2078 }
2079 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2080 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2081 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2082 BGE_EVTENB_MI_INTERRUPT);
2083 }
2084
2085 /*
2086 * Clear any pending link state attention.
2087 * Otherwise some link state change events may be lost until attention
2088 * is cleared by bge_intr() -> bge_link_upd() sequence.
2089 * It's not necessary on newer BCM chips - perhaps enabling link
2090 * state change attentions implies clearing pending attention.
2091 */
2092 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2093 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2094 BGE_MACSTAT_LINK_CHANGED);
2095
2096 /* Enable link state change attentions. */
2097 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2098
2099 return (0);
2100}
2101
2102const struct bge_revision *
2103bge_lookup_rev(uint32_t chipid)
2104{
2105 const struct bge_revision *br;
2106
2107 for (br = bge_revisions; br->br_name != NULL; br++) {
2108 if (br->br_chipid == chipid)
2109 return (br);
2110 }
2111
2112 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2113 if (br->br_chipid == BGE_ASICREV(chipid))
2114 return (br);
2115 }
2116
2117 return (NULL);
2118}
2119
2120const struct bge_vendor *
2121bge_lookup_vendor(uint16_t vid)
2122{
2123 const struct bge_vendor *v;
2124
2125 for (v = bge_vendors; v->v_name != NULL; v++)
2126 if (v->v_id == vid)
2127 return (v);
2128
2129 panic("%s: unknown vendor %d", __func__, vid);
2130 return (NULL);
2131}
2132
2133/*
2134 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2135 * against our list and return its name if we find a match.
2136 *
2137 * Note that since the Broadcom controller contains VPD support, we
2138 * try to get the device name string from the controller itself instead
2139 * of the compiled-in string. It guarantees we'll always announce the
2140 * right product name. We fall back to the compiled-in string when
2141 * VPD is unavailable or corrupt.
2142 */
2143static int
2144bge_probe(device_t dev)
2145{
2146 char buf[96];
2147 char model[64];
2148 const struct bge_revision *br;
2149 const char *pname;
2150 struct bge_softc *sc = device_get_softc(dev);
2151 const struct bge_type *t = bge_devs;
2152 const struct bge_vendor *v;
2153 uint32_t id;
2154 uint16_t did, vid;
2155
2156 sc->bge_dev = dev;
2157 vid = pci_get_vendor(dev);
2158 did = pci_get_device(dev);
2159 while(t->bge_vid != 0) {
2160 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2161 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2162 BGE_PCIMISCCTL_ASICREV_SHIFT;
2163 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2164 /*
2165 * Find the ASCI revision. Different chips
2166 * use different registers.
2167 */
2168 switch (pci_get_device(dev)) {
2169 case BCOM_DEVICEID_BCM5717:
2170 case BCOM_DEVICEID_BCM5718:
2171 id = pci_read_config(dev,
2172 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2173 break;
2174 default:
2175 id = pci_read_config(dev,
2176 BGE_PCI_PRODID_ASICREV, 4);
2177 }
2178 }
2179 br = bge_lookup_rev(id);
2180 v = bge_lookup_vendor(vid);
2181 if (bge_has_eaddr(sc) &&
2182 pci_get_vpd_ident(dev, &pname) == 0)
2183 snprintf(model, 64, "%s", pname);
2184 else
2185 snprintf(model, 64, "%s %s", v->v_name,
2186 br != NULL ? br->br_name :
2187 "NetXtreme Ethernet Controller");
2188 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2189 br != NULL ? "" : "unknown ", id);
2190 device_set_desc_copy(dev, buf);
2191 return (0);
2192 }
2193 t++;
2194 }
2195
2196 return (ENXIO);
2197}
2198
2199static void
2200bge_dma_free(struct bge_softc *sc)
2201{
2202 int i;
2203
2204 /* Destroy DMA maps for RX buffers. */
2205 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2206 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2207 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2208 sc->bge_cdata.bge_rx_std_dmamap[i]);
2209 }
2210 if (sc->bge_cdata.bge_rx_std_sparemap)
2211 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2212 sc->bge_cdata.bge_rx_std_sparemap);
2213
2214 /* Destroy DMA maps for jumbo RX buffers. */
2215 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2216 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2217 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2218 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2219 }
2220 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2221 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2222 sc->bge_cdata.bge_rx_jumbo_sparemap);
2223
2224 /* Destroy DMA maps for TX buffers. */
2225 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2226 if (sc->bge_cdata.bge_tx_dmamap[i])
2227 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2228 sc->bge_cdata.bge_tx_dmamap[i]);
2229 }
2230
2231 if (sc->bge_cdata.bge_rx_mtag)
2232 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2233 if (sc->bge_cdata.bge_tx_mtag)
2234 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2235
2236
2237 /* Destroy standard RX ring. */
2238 if (sc->bge_cdata.bge_rx_std_ring_map)
2239 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2240 sc->bge_cdata.bge_rx_std_ring_map);
2241 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2242 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2243 sc->bge_ldata.bge_rx_std_ring,
2244 sc->bge_cdata.bge_rx_std_ring_map);
2245
2246 if (sc->bge_cdata.bge_rx_std_ring_tag)
2247 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2248
2249 /* Destroy jumbo RX ring. */
2250 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2251 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2252 sc->bge_cdata.bge_rx_jumbo_ring_map);
2253
2254 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2255 sc->bge_ldata.bge_rx_jumbo_ring)
2256 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2257 sc->bge_ldata.bge_rx_jumbo_ring,
2258 sc->bge_cdata.bge_rx_jumbo_ring_map);
2259
2260 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2261 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2262
2263 /* Destroy RX return ring. */
2264 if (sc->bge_cdata.bge_rx_return_ring_map)
2265 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2266 sc->bge_cdata.bge_rx_return_ring_map);
2267
2268 if (sc->bge_cdata.bge_rx_return_ring_map &&
2269 sc->bge_ldata.bge_rx_return_ring)
2270 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2271 sc->bge_ldata.bge_rx_return_ring,
2272 sc->bge_cdata.bge_rx_return_ring_map);
2273
2274 if (sc->bge_cdata.bge_rx_return_ring_tag)
2275 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2276
2277 /* Destroy TX ring. */
2278 if (sc->bge_cdata.bge_tx_ring_map)
2279 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2280 sc->bge_cdata.bge_tx_ring_map);
2281
2282 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2283 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2284 sc->bge_ldata.bge_tx_ring,
2285 sc->bge_cdata.bge_tx_ring_map);
2286
2287 if (sc->bge_cdata.bge_tx_ring_tag)
2288 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2289
2290 /* Destroy status block. */
2291 if (sc->bge_cdata.bge_status_map)
2292 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2293 sc->bge_cdata.bge_status_map);
2294
2295 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2296 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2297 sc->bge_ldata.bge_status_block,
2298 sc->bge_cdata.bge_status_map);
2299
2300 if (sc->bge_cdata.bge_status_tag)
2301 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2302
2303 /* Destroy statistics block. */
2304 if (sc->bge_cdata.bge_stats_map)
2305 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2306 sc->bge_cdata.bge_stats_map);
2307
2308 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2309 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2310 sc->bge_ldata.bge_stats,
2311 sc->bge_cdata.bge_stats_map);
2312
2313 if (sc->bge_cdata.bge_stats_tag)
2314 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2315
2316 if (sc->bge_cdata.bge_buffer_tag)
2317 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2318
2319 /* Destroy the parent tag. */
2320 if (sc->bge_cdata.bge_parent_tag)
2321 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2322}
2323
2324static int
2325bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2326 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2327 bus_addr_t *paddr, const char *msg)
2328{
2329 struct bge_dmamap_arg ctx;
2330 bus_addr_t lowaddr;
2331 bus_size_t ring_end;
2332 int error;
2333
2334 lowaddr = BUS_SPACE_MAXADDR;
2335again:
2336 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2337 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2338 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2339 if (error != 0) {
2340 device_printf(sc->bge_dev,
2341 "could not create %s dma tag\n", msg);
2342 return (ENOMEM);
2343 }
2344 /* Allocate DMA'able memory for ring. */
2345 error = bus_dmamem_alloc(*tag, (void **)ring,
2346 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2347 if (error != 0) {
2348 device_printf(sc->bge_dev,
2349 "could not allocate DMA'able memory for %s\n", msg);
2350 return (ENOMEM);
2351 }
2352 /* Load the address of the ring. */
2353 ctx.bge_busaddr = 0;
2354 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2355 &ctx, BUS_DMA_NOWAIT);
2356 if (error != 0) {
2357 device_printf(sc->bge_dev,
2358 "could not load DMA'able memory for %s\n", msg);
2359 return (ENOMEM);
2360 }
2361 *paddr = ctx.bge_busaddr;
2362 ring_end = *paddr + maxsize;
2363 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2364 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2365 /*
2366 * 4GB boundary crossed. Limit maximum allowable DMA
2367 * address space to 32bit and try again.
2368 */
2369 bus_dmamap_unload(*tag, *map);
2370 bus_dmamem_free(*tag, *ring, *map);
2371 bus_dma_tag_destroy(*tag);
2372 if (bootverbose)
2373 device_printf(sc->bge_dev, "4GB boundary crossed, "
2374 "limit DMA address space to 32bit for %s\n", msg);
2375 *ring = NULL;
2376 *tag = NULL;
2377 *map = NULL;
2378 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2379 goto again;
2380 }
2381 return (0);
2382}
2383
2384static int
2385bge_dma_alloc(struct bge_softc *sc)
2386{
2387 bus_addr_t lowaddr;
959 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
960 m_adj(m, ETHER_ALIGN);
961
962 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
963 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
964 if (error != 0) {
965 m_freem(m);
966 return (error);
967 }
968 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
969 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
970 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
971 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
972 sc->bge_cdata.bge_rx_std_dmamap[i]);
973 }
974 map = sc->bge_cdata.bge_rx_std_dmamap[i];
975 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
976 sc->bge_cdata.bge_rx_std_sparemap = map;
977 sc->bge_cdata.bge_rx_std_chain[i] = m;
978 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
979 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
980 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
981 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
982 r->bge_flags = BGE_RXBDFLAG_END;
983 r->bge_len = segs[0].ds_len;
984 r->bge_idx = i;
985
986 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
987 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
988
989 return (0);
990}
991
992/*
993 * Initialize a jumbo receive ring descriptor. This allocates
994 * a jumbo buffer from the pool managed internally by the driver.
995 */
996static int
997bge_newbuf_jumbo(struct bge_softc *sc, int i)
998{
999 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1000 bus_dmamap_t map;
1001 struct bge_extrx_bd *r;
1002 struct mbuf *m;
1003 int error, nsegs;
1004
1005 MGETHDR(m, M_DONTWAIT, MT_DATA);
1006 if (m == NULL)
1007 return (ENOBUFS);
1008
1009 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1010 if (!(m->m_flags & M_EXT)) {
1011 m_freem(m);
1012 return (ENOBUFS);
1013 }
1014 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1015 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1016 m_adj(m, ETHER_ALIGN);
1017
1018 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1019 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1020 if (error != 0) {
1021 m_freem(m);
1022 return (error);
1023 }
1024
1025 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1026 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1027 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1028 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1029 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1030 }
1031 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1032 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1033 sc->bge_cdata.bge_rx_jumbo_sparemap;
1034 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1035 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1036 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1037 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1038 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1039 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1040
1041 /*
1042 * Fill in the extended RX buffer descriptor.
1043 */
1044 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1045 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1046 r->bge_idx = i;
1047 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1048 switch (nsegs) {
1049 case 4:
1050 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1051 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1052 r->bge_len3 = segs[3].ds_len;
1053 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1054 case 3:
1055 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1056 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1057 r->bge_len2 = segs[2].ds_len;
1058 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1059 case 2:
1060 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1061 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1062 r->bge_len1 = segs[1].ds_len;
1063 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1064 case 1:
1065 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1066 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1067 r->bge_len0 = segs[0].ds_len;
1068 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1069 break;
1070 default:
1071 panic("%s: %d segments\n", __func__, nsegs);
1072 }
1073
1074 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1075 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1076
1077 return (0);
1078}
1079
1080static int
1081bge_init_rx_ring_std(struct bge_softc *sc)
1082{
1083 int error, i;
1084
1085 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1086 sc->bge_std = 0;
1087 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1088 if ((error = bge_newbuf_std(sc, i)) != 0)
1089 return (error);
1090 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1091 }
1092
1093 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1094 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1095
1096 sc->bge_std = 0;
1097 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1098
1099 return (0);
1100}
1101
1102static void
1103bge_free_rx_ring_std(struct bge_softc *sc)
1104{
1105 int i;
1106
1107 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1108 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1109 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1110 sc->bge_cdata.bge_rx_std_dmamap[i],
1111 BUS_DMASYNC_POSTREAD);
1112 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1113 sc->bge_cdata.bge_rx_std_dmamap[i]);
1114 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1115 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1116 }
1117 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1118 sizeof(struct bge_rx_bd));
1119 }
1120}
1121
1122static int
1123bge_init_rx_ring_jumbo(struct bge_softc *sc)
1124{
1125 struct bge_rcb *rcb;
1126 int error, i;
1127
1128 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1129 sc->bge_jumbo = 0;
1130 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1131 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1132 return (error);
1133 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1134 }
1135
1136 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1137 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1138
1139 sc->bge_jumbo = 0;
1140
1141 /* Enable the jumbo receive producer ring. */
1142 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1143 rcb->bge_maxlen_flags =
1144 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1145 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1146
1147 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1148
1149 return (0);
1150}
1151
1152static void
1153bge_free_rx_ring_jumbo(struct bge_softc *sc)
1154{
1155 int i;
1156
1157 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1158 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1159 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1160 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1161 BUS_DMASYNC_POSTREAD);
1162 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1163 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1164 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1165 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1166 }
1167 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1168 sizeof(struct bge_extrx_bd));
1169 }
1170}
1171
1172static void
1173bge_free_tx_ring(struct bge_softc *sc)
1174{
1175 int i;
1176
1177 if (sc->bge_ldata.bge_tx_ring == NULL)
1178 return;
1179
1180 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1181 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1182 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1183 sc->bge_cdata.bge_tx_dmamap[i],
1184 BUS_DMASYNC_POSTWRITE);
1185 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1186 sc->bge_cdata.bge_tx_dmamap[i]);
1187 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1188 sc->bge_cdata.bge_tx_chain[i] = NULL;
1189 }
1190 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1191 sizeof(struct bge_tx_bd));
1192 }
1193}
1194
1195static int
1196bge_init_tx_ring(struct bge_softc *sc)
1197{
1198 sc->bge_txcnt = 0;
1199 sc->bge_tx_saved_considx = 0;
1200
1201 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1202 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1203 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1204
1205 /* Initialize transmit producer index for host-memory send ring. */
1206 sc->bge_tx_prodidx = 0;
1207 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1208
1209 /* 5700 b2 errata */
1210 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1211 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1212
1213 /* NIC-memory send ring not used; initialize to zero. */
1214 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1215 /* 5700 b2 errata */
1216 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1217 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1218
1219 return (0);
1220}
1221
1222static void
1223bge_setpromisc(struct bge_softc *sc)
1224{
1225 struct ifnet *ifp;
1226
1227 BGE_LOCK_ASSERT(sc);
1228
1229 ifp = sc->bge_ifp;
1230
1231 /* Enable or disable promiscuous mode as needed. */
1232 if (ifp->if_flags & IFF_PROMISC)
1233 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1234 else
1235 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1236}
1237
1238static void
1239bge_setmulti(struct bge_softc *sc)
1240{
1241 struct ifnet *ifp;
1242 struct ifmultiaddr *ifma;
1243 uint32_t hashes[4] = { 0, 0, 0, 0 };
1244 int h, i;
1245
1246 BGE_LOCK_ASSERT(sc);
1247
1248 ifp = sc->bge_ifp;
1249
1250 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1251 for (i = 0; i < 4; i++)
1252 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1253 return;
1254 }
1255
1256 /* First, zot all the existing filters. */
1257 for (i = 0; i < 4; i++)
1258 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1259
1260 /* Now program new ones. */
1261 if_maddr_rlock(ifp);
1262 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1263 if (ifma->ifma_addr->sa_family != AF_LINK)
1264 continue;
1265 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1266 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1267 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1268 }
1269 if_maddr_runlock(ifp);
1270
1271 for (i = 0; i < 4; i++)
1272 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1273}
1274
1275static void
1276bge_setvlan(struct bge_softc *sc)
1277{
1278 struct ifnet *ifp;
1279
1280 BGE_LOCK_ASSERT(sc);
1281
1282 ifp = sc->bge_ifp;
1283
1284 /* Enable or disable VLAN tag stripping as needed. */
1285 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1286 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1287 else
1288 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1289}
1290
1291static void
1292bge_sig_pre_reset(struct bge_softc *sc, int type)
1293{
1294
1295 /*
1296 * Some chips don't like this so only do this if ASF is enabled
1297 */
1298 if (sc->bge_asf_mode)
1299 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1300
1301 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1302 switch (type) {
1303 case BGE_RESET_START:
1304 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1305 break;
1306 case BGE_RESET_STOP:
1307 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1308 break;
1309 }
1310 }
1311}
1312
1313static void
1314bge_sig_post_reset(struct bge_softc *sc, int type)
1315{
1316
1317 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1318 switch (type) {
1319 case BGE_RESET_START:
1320 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1321 /* START DONE */
1322 break;
1323 case BGE_RESET_STOP:
1324 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1325 break;
1326 }
1327 }
1328}
1329
1330static void
1331bge_sig_legacy(struct bge_softc *sc, int type)
1332{
1333
1334 if (sc->bge_asf_mode) {
1335 switch (type) {
1336 case BGE_RESET_START:
1337 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1338 break;
1339 case BGE_RESET_STOP:
1340 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1341 break;
1342 }
1343 }
1344}
1345
1346static void
1347bge_stop_fw(struct bge_softc *sc)
1348{
1349 int i;
1350
1351 if (sc->bge_asf_mode) {
1352 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1353 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1354 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1355
1356 for (i = 0; i < 100; i++ ) {
1357 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1358 break;
1359 DELAY(10);
1360 }
1361 }
1362}
1363
1364/*
1365 * Do endian, PCI and DMA initialization.
1366 */
1367static int
1368bge_chipinit(struct bge_softc *sc)
1369{
1370 uint32_t dma_rw_ctl, misc_ctl;
1371 uint16_t val;
1372 int i;
1373
1374 /* Set endianness before we access any non-PCI registers. */
1375 misc_ctl = BGE_INIT;
1376 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1377 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1378 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1379
1380 /* Clear the MAC control register */
1381 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1382
1383 /*
1384 * Clear the MAC statistics block in the NIC's
1385 * internal memory.
1386 */
1387 for (i = BGE_STATS_BLOCK;
1388 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1389 BGE_MEMWIN_WRITE(sc, i, 0);
1390
1391 for (i = BGE_STATUS_BLOCK;
1392 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1393 BGE_MEMWIN_WRITE(sc, i, 0);
1394
1395 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1396 /*
1397 * Fix data corruption caused by non-qword write with WB.
1398 * Fix master abort in PCI mode.
1399 * Fix PCI latency timer.
1400 */
1401 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1402 val |= (1 << 10) | (1 << 12) | (1 << 13);
1403 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1404 }
1405
1406 /*
1407 * Set up the PCI DMA control register.
1408 */
1409 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1410 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1411 if (sc->bge_flags & BGE_FLAG_PCIE) {
1412 /* Read watermark not used, 128 bytes for write. */
1413 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1414 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1415 if (BGE_IS_5714_FAMILY(sc)) {
1416 /* 256 bytes for read and write. */
1417 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1418 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1419 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1420 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1421 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1422 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1423 /*
1424 * In the BCM5703, the DMA read watermark should
1425 * be set to less than or equal to the maximum
1426 * memory read byte count of the PCI-X command
1427 * register.
1428 */
1429 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1430 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1431 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1432 /* 1536 bytes for read, 384 bytes for write. */
1433 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1434 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1435 } else {
1436 /* 384 bytes for read and write. */
1437 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1438 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1439 0x0F;
1440 }
1441 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1442 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1443 uint32_t tmp;
1444
1445 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1446 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1447 if (tmp == 6 || tmp == 7)
1448 dma_rw_ctl |=
1449 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1450
1451 /* Set PCI-X DMA write workaround. */
1452 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1453 }
1454 } else {
1455 /* Conventional PCI bus: 256 bytes for read and write. */
1456 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1457 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1458
1459 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1460 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1461 dma_rw_ctl |= 0x0F;
1462 }
1463 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1464 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1465 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1466 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1468 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1469 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1470 if (BGE_IS_5717_PLUS(sc))
1471 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1472 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1473
1474 /*
1475 * Set up general mode register.
1476 */
1477 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1478 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1479 BGE_MODECTL_TX_NO_PHDR_CSUM);
1480
1481 /*
1482 * BCM5701 B5 have a bug causing data corruption when using
1483 * 64-bit DMA reads, which can be terminated early and then
1484 * completed later as 32-bit accesses, in combination with
1485 * certain bridges.
1486 */
1487 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1488 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1489 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1490
1491 /*
1492 * Tell the firmware the driver is running
1493 */
1494 if (sc->bge_asf_mode & ASF_STACKUP)
1495 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1496
1497 /*
1498 * Disable memory write invalidate. Apparently it is not supported
1499 * properly by these devices. Also ensure that INTx isn't disabled,
1500 * as these chips need it even when using MSI.
1501 */
1502 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1503 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1504
1505 /* Set the timer prescaler (always 66Mhz) */
1506 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1507
1508 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1509 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1510 DELAY(40); /* XXX */
1511
1512 /* Put PHY into ready state */
1513 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1514 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1515 DELAY(40);
1516 }
1517
1518 return (0);
1519}
1520
1521static int
1522bge_blockinit(struct bge_softc *sc)
1523{
1524 struct bge_rcb *rcb;
1525 bus_size_t vrcb;
1526 bge_hostaddr taddr;
1527 uint32_t val;
1528 int i, limit;
1529
1530 /*
1531 * Initialize the memory window pointer register so that
1532 * we can access the first 32K of internal NIC RAM. This will
1533 * allow us to set up the TX send ring RCBs and the RX return
1534 * ring RCBs, plus other things which live in NIC memory.
1535 */
1536 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1537
1538 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1539
1540 if (!(BGE_IS_5705_PLUS(sc))) {
1541 /* Configure mbuf memory pool */
1542 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1543 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1544 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1545 else
1546 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1547
1548 /* Configure DMA resource pool */
1549 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1550 BGE_DMA_DESCRIPTORS);
1551 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1552 }
1553
1554 /* Configure mbuf pool watermarks */
1555 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1556 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1557 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1558 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1559 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1560 } else {
1561 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1562 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1563 }
1564 } else if (!BGE_IS_5705_PLUS(sc)) {
1565 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1566 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1567 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1568 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1569 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1570 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1571 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1572 } else {
1573 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1574 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1575 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1576 }
1577
1578 /* Configure DMA resource watermarks */
1579 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1580 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1581
1582 /* Enable buffer manager */
1583 if (!(BGE_IS_5705_PLUS(sc))) {
1584 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1585 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1586
1587 /* Poll for buffer manager start indication */
1588 for (i = 0; i < BGE_TIMEOUT; i++) {
1589 DELAY(10);
1590 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1591 break;
1592 }
1593
1594 if (i == BGE_TIMEOUT) {
1595 device_printf(sc->bge_dev,
1596 "buffer manager failed to start\n");
1597 return (ENXIO);
1598 }
1599 }
1600
1601 /* Enable flow-through queues */
1602 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1603 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1604
1605 /* Wait until queue initialization is complete */
1606 for (i = 0; i < BGE_TIMEOUT; i++) {
1607 DELAY(10);
1608 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1609 break;
1610 }
1611
1612 if (i == BGE_TIMEOUT) {
1613 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1614 return (ENXIO);
1615 }
1616
1617 /*
1618 * Summary of rings supported by the controller:
1619 *
1620 * Standard Receive Producer Ring
1621 * - This ring is used to feed receive buffers for "standard"
1622 * sized frames (typically 1536 bytes) to the controller.
1623 *
1624 * Jumbo Receive Producer Ring
1625 * - This ring is used to feed receive buffers for jumbo sized
1626 * frames (i.e. anything bigger than the "standard" frames)
1627 * to the controller.
1628 *
1629 * Mini Receive Producer Ring
1630 * - This ring is used to feed receive buffers for "mini"
1631 * sized frames to the controller.
1632 * - This feature required external memory for the controller
1633 * but was never used in a production system. Should always
1634 * be disabled.
1635 *
1636 * Receive Return Ring
1637 * - After the controller has placed an incoming frame into a
1638 * receive buffer that buffer is moved into a receive return
1639 * ring. The driver is then responsible to passing the
1640 * buffer up to the stack. Many versions of the controller
1641 * support multiple RR rings.
1642 *
1643 * Send Ring
1644 * - This ring is used for outgoing frames. Many versions of
1645 * the controller support multiple send rings.
1646 */
1647
1648 /* Initialize the standard receive producer ring control block. */
1649 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1650 rcb->bge_hostaddr.bge_addr_lo =
1651 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1652 rcb->bge_hostaddr.bge_addr_hi =
1653 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1654 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1655 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1656 if (BGE_IS_5717_PLUS(sc)) {
1657 /*
1658 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1659 * Bits 15-2 : Maximum RX frame size
1660 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1661 * Bit 0 : Reserved
1662 */
1663 rcb->bge_maxlen_flags =
1664 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1665 } else if (BGE_IS_5705_PLUS(sc)) {
1666 /*
1667 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1668 * Bits 15-2 : Reserved (should be 0)
1669 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1670 * Bit 0 : Reserved
1671 */
1672 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1673 } else {
1674 /*
1675 * Ring size is always XXX entries
1676 * Bits 31-16: Maximum RX frame size
1677 * Bits 15-2 : Reserved (should be 0)
1678 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1679 * Bit 0 : Reserved
1680 */
1681 rcb->bge_maxlen_flags =
1682 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1683 }
1684 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1685 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1686 else
1687 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1688 /* Write the standard receive producer ring control block. */
1689 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1690 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1691 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1692 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1693
1694 /* Reset the standard receive producer ring producer index. */
1695 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1696
1697 /*
1698 * Initialize the jumbo RX producer ring control
1699 * block. We set the 'ring disabled' bit in the
1700 * flags field until we're actually ready to start
1701 * using this ring (i.e. once we set the MTU
1702 * high enough to require it).
1703 */
1704 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1705 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1706 /* Get the jumbo receive producer ring RCB parameters. */
1707 rcb->bge_hostaddr.bge_addr_lo =
1708 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1709 rcb->bge_hostaddr.bge_addr_hi =
1710 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1711 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1712 sc->bge_cdata.bge_rx_jumbo_ring_map,
1713 BUS_DMASYNC_PREREAD);
1714 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1715 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1716 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1717 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1718 else
1719 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1720 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1721 rcb->bge_hostaddr.bge_addr_hi);
1722 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1723 rcb->bge_hostaddr.bge_addr_lo);
1724 /* Program the jumbo receive producer ring RCB parameters. */
1725 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1726 rcb->bge_maxlen_flags);
1727 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1728 /* Reset the jumbo receive producer ring producer index. */
1729 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1730 }
1731
1732 /* Disable the mini receive producer ring RCB. */
1733 if (BGE_IS_5700_FAMILY(sc)) {
1734 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1735 rcb->bge_maxlen_flags =
1736 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1737 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1738 rcb->bge_maxlen_flags);
1739 /* Reset the mini receive producer ring producer index. */
1740 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1741 }
1742
1743 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1744 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1745 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1746 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1747 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1748 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1749 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1750 }
1751 /*
1752 * The BD ring replenish thresholds control how often the
1753 * hardware fetches new BD's from the producer rings in host
1754 * memory. Setting the value too low on a busy system can
1755 * starve the hardware and recue the throughpout.
1756 *
1757 * Set the BD ring replentish thresholds. The recommended
1758 * values are 1/8th the number of descriptors allocated to
1759 * each ring.
1760 * XXX The 5754 requires a lower threshold, so it might be a
1761 * requirement of all 575x family chips. The Linux driver sets
1762 * the lower threshold for all 5705 family chips as well, but there
1763 * are reports that it might not need to be so strict.
1764 *
1765 * XXX Linux does some extra fiddling here for the 5906 parts as
1766 * well.
1767 */
1768 if (BGE_IS_5705_PLUS(sc))
1769 val = 8;
1770 else
1771 val = BGE_STD_RX_RING_CNT / 8;
1772 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1773 if (BGE_IS_JUMBO_CAPABLE(sc))
1774 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1775 BGE_JUMBO_RX_RING_CNT/8);
1776 if (BGE_IS_5717_PLUS(sc)) {
1777 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1778 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1779 }
1780
1781 /*
1782 * Disable all send rings by setting the 'ring disabled' bit
1783 * in the flags field of all the TX send ring control blocks,
1784 * located in NIC memory.
1785 */
1786 if (!BGE_IS_5705_PLUS(sc))
1787 /* 5700 to 5704 had 16 send rings. */
1788 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1789 else
1790 limit = 1;
1791 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1792 for (i = 0; i < limit; i++) {
1793 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1794 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1795 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1796 vrcb += sizeof(struct bge_rcb);
1797 }
1798
1799 /* Configure send ring RCB 0 (we use only the first ring) */
1800 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1801 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1802 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1803 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1804 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1805 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1806 else
1807 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1808 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1809 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1810 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1811
1812 /*
1813 * Disable all receive return rings by setting the
1814 * 'ring diabled' bit in the flags field of all the receive
1815 * return ring control blocks, located in NIC memory.
1816 */
1817 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1818 /* Should be 17, use 16 until we get an SRAM map. */
1819 limit = 16;
1820 } else if (!BGE_IS_5705_PLUS(sc))
1821 limit = BGE_RX_RINGS_MAX;
1822 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1823 limit = 4;
1824 else
1825 limit = 1;
1826 /* Disable all receive return rings. */
1827 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1828 for (i = 0; i < limit; i++) {
1829 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1830 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1831 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1832 BGE_RCB_FLAG_RING_DISABLED);
1833 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1834 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1835 (i * (sizeof(uint64_t))), 0);
1836 vrcb += sizeof(struct bge_rcb);
1837 }
1838
1839 /*
1840 * Set up receive return ring 0. Note that the NIC address
1841 * for RX return rings is 0x0. The return rings live entirely
1842 * within the host, so the nicaddr field in the RCB isn't used.
1843 */
1844 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1845 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1846 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1847 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1848 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1849 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1850 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1851
1852 /* Set random backoff seed for TX */
1853 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1854 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1855 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1856 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1857 BGE_TX_BACKOFF_SEED_MASK);
1858
1859 /* Set inter-packet gap */
1860 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1861
1862 /*
1863 * Specify which ring to use for packets that don't match
1864 * any RX rules.
1865 */
1866 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1867
1868 /*
1869 * Configure number of RX lists. One interrupt distribution
1870 * list, sixteen active lists, one bad frames class.
1871 */
1872 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1873
1874 /* Inialize RX list placement stats mask. */
1875 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1876 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1877
1878 /* Disable host coalescing until we get it set up */
1879 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1880
1881 /* Poll to make sure it's shut down. */
1882 for (i = 0; i < BGE_TIMEOUT; i++) {
1883 DELAY(10);
1884 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1885 break;
1886 }
1887
1888 if (i == BGE_TIMEOUT) {
1889 device_printf(sc->bge_dev,
1890 "host coalescing engine failed to idle\n");
1891 return (ENXIO);
1892 }
1893
1894 /* Set up host coalescing defaults */
1895 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1896 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1897 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1898 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1899 if (!(BGE_IS_5705_PLUS(sc))) {
1900 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1901 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1902 }
1903 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1904 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1905
1906 /* Set up address of statistics block */
1907 if (!(BGE_IS_5705_PLUS(sc))) {
1908 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1909 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1910 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1911 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1912 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1913 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1914 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1915 }
1916
1917 /* Set up address of status block */
1918 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1919 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1920 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1921 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1922
1923 /* Set up status block size. */
1924 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1925 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1926 val = BGE_STATBLKSZ_FULL;
1927 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1928 } else {
1929 val = BGE_STATBLKSZ_32BYTE;
1930 bzero(sc->bge_ldata.bge_status_block, 32);
1931 }
1932 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1933 sc->bge_cdata.bge_status_map,
1934 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1935
1936 /* Turn on host coalescing state machine */
1937 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1938
1939 /* Turn on RX BD completion state machine and enable attentions */
1940 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1941 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1942
1943 /* Turn on RX list placement state machine */
1944 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1945
1946 /* Turn on RX list selector state machine. */
1947 if (!(BGE_IS_5705_PLUS(sc)))
1948 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1949
1950 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1951 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1952 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1953 BGE_MACMODE_FRMHDR_DMA_ENB;
1954
1955 if (sc->bge_flags & BGE_FLAG_TBI)
1956 val |= BGE_PORTMODE_TBI;
1957 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1958 val |= BGE_PORTMODE_GMII;
1959 else
1960 val |= BGE_PORTMODE_MII;
1961
1962 /* Turn on DMA, clear stats */
1963 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1964
1965 /* Set misc. local control, enable interrupts on attentions */
1966 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1967
1968#ifdef notdef
1969 /* Assert GPIO pins for PHY reset */
1970 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1971 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1972 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1973 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1974#endif
1975
1976 /* Turn on DMA completion state machine */
1977 if (!(BGE_IS_5705_PLUS(sc)))
1978 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1979
1980 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1981
1982 /* Enable host coalescing bug fix. */
1983 if (BGE_IS_5755_PLUS(sc))
1984 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1985
1986 /* Request larger DMA burst size to get better performance. */
1987 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1988 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1989
1990 /* Turn on write DMA state machine */
1991 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1992 DELAY(40);
1993
1994 /* Turn on read DMA state machine */
1995 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1996
1997 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1998 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1999
2000 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2001 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2002 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2003 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2004 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2005 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2006 if (sc->bge_flags & BGE_FLAG_PCIE)
2007 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2008 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2009 val |= BGE_RDMAMODE_TSO4_ENABLE;
2010 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2011 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2012 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2013 val |= BGE_RDMAMODE_TSO6_ENABLE;
2014 }
2015 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2016 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2017 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2018 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2019 BGE_IS_5717_PLUS(sc)) {
2020 /*
2021 * Enable fix for read DMA FIFO overruns.
2022 * The fix is to limit the number of RX BDs
2023 * the hardware would fetch at a fime.
2024 */
2025 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
2026 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
2027 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2028 }
2029 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2030 DELAY(40);
2031
2032 /* Turn on RX data completion state machine */
2033 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2034
2035 /* Turn on RX BD initiator state machine */
2036 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2037
2038 /* Turn on RX data and RX BD initiator state machine */
2039 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2040
2041 /* Turn on Mbuf cluster free state machine */
2042 if (!(BGE_IS_5705_PLUS(sc)))
2043 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2044
2045 /* Turn on send BD completion state machine */
2046 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2047
2048 /* Turn on send data completion state machine */
2049 val = BGE_SDCMODE_ENABLE;
2050 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2051 val |= BGE_SDCMODE_CDELAY;
2052 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2053
2054 /* Turn on send data initiator state machine */
2055 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2056 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2057 BGE_SDIMODE_HW_LSO_PRE_DMA);
2058 else
2059 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2060
2061 /* Turn on send BD initiator state machine */
2062 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2063
2064 /* Turn on send BD selector state machine */
2065 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2066
2067 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2068 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2069 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2070
2071 /* ack/clear link change events */
2072 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2073 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2074 BGE_MACSTAT_LINK_CHANGED);
2075 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2076
2077 /*
2078 * Enable attention when the link has changed state for
2079 * devices that use auto polling.
2080 */
2081 if (sc->bge_flags & BGE_FLAG_TBI) {
2082 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2083 } else {
2084 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2085 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2086 DELAY(80);
2087 }
2088 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2089 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2090 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2091 BGE_EVTENB_MI_INTERRUPT);
2092 }
2093
2094 /*
2095 * Clear any pending link state attention.
2096 * Otherwise some link state change events may be lost until attention
2097 * is cleared by bge_intr() -> bge_link_upd() sequence.
2098 * It's not necessary on newer BCM chips - perhaps enabling link
2099 * state change attentions implies clearing pending attention.
2100 */
2101 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2102 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2103 BGE_MACSTAT_LINK_CHANGED);
2104
2105 /* Enable link state change attentions. */
2106 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2107
2108 return (0);
2109}
2110
2111const struct bge_revision *
2112bge_lookup_rev(uint32_t chipid)
2113{
2114 const struct bge_revision *br;
2115
2116 for (br = bge_revisions; br->br_name != NULL; br++) {
2117 if (br->br_chipid == chipid)
2118 return (br);
2119 }
2120
2121 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2122 if (br->br_chipid == BGE_ASICREV(chipid))
2123 return (br);
2124 }
2125
2126 return (NULL);
2127}
2128
2129const struct bge_vendor *
2130bge_lookup_vendor(uint16_t vid)
2131{
2132 const struct bge_vendor *v;
2133
2134 for (v = bge_vendors; v->v_name != NULL; v++)
2135 if (v->v_id == vid)
2136 return (v);
2137
2138 panic("%s: unknown vendor %d", __func__, vid);
2139 return (NULL);
2140}
2141
2142/*
2143 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2144 * against our list and return its name if we find a match.
2145 *
2146 * Note that since the Broadcom controller contains VPD support, we
2147 * try to get the device name string from the controller itself instead
2148 * of the compiled-in string. It guarantees we'll always announce the
2149 * right product name. We fall back to the compiled-in string when
2150 * VPD is unavailable or corrupt.
2151 */
2152static int
2153bge_probe(device_t dev)
2154{
2155 char buf[96];
2156 char model[64];
2157 const struct bge_revision *br;
2158 const char *pname;
2159 struct bge_softc *sc = device_get_softc(dev);
2160 const struct bge_type *t = bge_devs;
2161 const struct bge_vendor *v;
2162 uint32_t id;
2163 uint16_t did, vid;
2164
2165 sc->bge_dev = dev;
2166 vid = pci_get_vendor(dev);
2167 did = pci_get_device(dev);
2168 while(t->bge_vid != 0) {
2169 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2170 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2171 BGE_PCIMISCCTL_ASICREV_SHIFT;
2172 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2173 /*
2174 * Find the ASCI revision. Different chips
2175 * use different registers.
2176 */
2177 switch (pci_get_device(dev)) {
2178 case BCOM_DEVICEID_BCM5717:
2179 case BCOM_DEVICEID_BCM5718:
2180 id = pci_read_config(dev,
2181 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2182 break;
2183 default:
2184 id = pci_read_config(dev,
2185 BGE_PCI_PRODID_ASICREV, 4);
2186 }
2187 }
2188 br = bge_lookup_rev(id);
2189 v = bge_lookup_vendor(vid);
2190 if (bge_has_eaddr(sc) &&
2191 pci_get_vpd_ident(dev, &pname) == 0)
2192 snprintf(model, 64, "%s", pname);
2193 else
2194 snprintf(model, 64, "%s %s", v->v_name,
2195 br != NULL ? br->br_name :
2196 "NetXtreme Ethernet Controller");
2197 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2198 br != NULL ? "" : "unknown ", id);
2199 device_set_desc_copy(dev, buf);
2200 return (0);
2201 }
2202 t++;
2203 }
2204
2205 return (ENXIO);
2206}
2207
2208static void
2209bge_dma_free(struct bge_softc *sc)
2210{
2211 int i;
2212
2213 /* Destroy DMA maps for RX buffers. */
2214 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2215 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2216 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2217 sc->bge_cdata.bge_rx_std_dmamap[i]);
2218 }
2219 if (sc->bge_cdata.bge_rx_std_sparemap)
2220 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2221 sc->bge_cdata.bge_rx_std_sparemap);
2222
2223 /* Destroy DMA maps for jumbo RX buffers. */
2224 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2225 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2226 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2227 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2228 }
2229 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2230 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2231 sc->bge_cdata.bge_rx_jumbo_sparemap);
2232
2233 /* Destroy DMA maps for TX buffers. */
2234 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2235 if (sc->bge_cdata.bge_tx_dmamap[i])
2236 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2237 sc->bge_cdata.bge_tx_dmamap[i]);
2238 }
2239
2240 if (sc->bge_cdata.bge_rx_mtag)
2241 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2242 if (sc->bge_cdata.bge_tx_mtag)
2243 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2244
2245
2246 /* Destroy standard RX ring. */
2247 if (sc->bge_cdata.bge_rx_std_ring_map)
2248 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2249 sc->bge_cdata.bge_rx_std_ring_map);
2250 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2251 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2252 sc->bge_ldata.bge_rx_std_ring,
2253 sc->bge_cdata.bge_rx_std_ring_map);
2254
2255 if (sc->bge_cdata.bge_rx_std_ring_tag)
2256 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2257
2258 /* Destroy jumbo RX ring. */
2259 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2260 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2261 sc->bge_cdata.bge_rx_jumbo_ring_map);
2262
2263 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2264 sc->bge_ldata.bge_rx_jumbo_ring)
2265 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2266 sc->bge_ldata.bge_rx_jumbo_ring,
2267 sc->bge_cdata.bge_rx_jumbo_ring_map);
2268
2269 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2270 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2271
2272 /* Destroy RX return ring. */
2273 if (sc->bge_cdata.bge_rx_return_ring_map)
2274 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2275 sc->bge_cdata.bge_rx_return_ring_map);
2276
2277 if (sc->bge_cdata.bge_rx_return_ring_map &&
2278 sc->bge_ldata.bge_rx_return_ring)
2279 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2280 sc->bge_ldata.bge_rx_return_ring,
2281 sc->bge_cdata.bge_rx_return_ring_map);
2282
2283 if (sc->bge_cdata.bge_rx_return_ring_tag)
2284 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2285
2286 /* Destroy TX ring. */
2287 if (sc->bge_cdata.bge_tx_ring_map)
2288 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2289 sc->bge_cdata.bge_tx_ring_map);
2290
2291 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2292 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2293 sc->bge_ldata.bge_tx_ring,
2294 sc->bge_cdata.bge_tx_ring_map);
2295
2296 if (sc->bge_cdata.bge_tx_ring_tag)
2297 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2298
2299 /* Destroy status block. */
2300 if (sc->bge_cdata.bge_status_map)
2301 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2302 sc->bge_cdata.bge_status_map);
2303
2304 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2305 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2306 sc->bge_ldata.bge_status_block,
2307 sc->bge_cdata.bge_status_map);
2308
2309 if (sc->bge_cdata.bge_status_tag)
2310 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2311
2312 /* Destroy statistics block. */
2313 if (sc->bge_cdata.bge_stats_map)
2314 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2315 sc->bge_cdata.bge_stats_map);
2316
2317 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2318 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2319 sc->bge_ldata.bge_stats,
2320 sc->bge_cdata.bge_stats_map);
2321
2322 if (sc->bge_cdata.bge_stats_tag)
2323 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2324
2325 if (sc->bge_cdata.bge_buffer_tag)
2326 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2327
2328 /* Destroy the parent tag. */
2329 if (sc->bge_cdata.bge_parent_tag)
2330 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2331}
2332
2333static int
2334bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2335 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2336 bus_addr_t *paddr, const char *msg)
2337{
2338 struct bge_dmamap_arg ctx;
2339 bus_addr_t lowaddr;
2340 bus_size_t ring_end;
2341 int error;
2342
2343 lowaddr = BUS_SPACE_MAXADDR;
2344again:
2345 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2346 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2347 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2348 if (error != 0) {
2349 device_printf(sc->bge_dev,
2350 "could not create %s dma tag\n", msg);
2351 return (ENOMEM);
2352 }
2353 /* Allocate DMA'able memory for ring. */
2354 error = bus_dmamem_alloc(*tag, (void **)ring,
2355 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2356 if (error != 0) {
2357 device_printf(sc->bge_dev,
2358 "could not allocate DMA'able memory for %s\n", msg);
2359 return (ENOMEM);
2360 }
2361 /* Load the address of the ring. */
2362 ctx.bge_busaddr = 0;
2363 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2364 &ctx, BUS_DMA_NOWAIT);
2365 if (error != 0) {
2366 device_printf(sc->bge_dev,
2367 "could not load DMA'able memory for %s\n", msg);
2368 return (ENOMEM);
2369 }
2370 *paddr = ctx.bge_busaddr;
2371 ring_end = *paddr + maxsize;
2372 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2373 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2374 /*
2375 * 4GB boundary crossed. Limit maximum allowable DMA
2376 * address space to 32bit and try again.
2377 */
2378 bus_dmamap_unload(*tag, *map);
2379 bus_dmamem_free(*tag, *ring, *map);
2380 bus_dma_tag_destroy(*tag);
2381 if (bootverbose)
2382 device_printf(sc->bge_dev, "4GB boundary crossed, "
2383 "limit DMA address space to 32bit for %s\n", msg);
2384 *ring = NULL;
2385 *tag = NULL;
2386 *map = NULL;
2387 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2388 goto again;
2389 }
2390 return (0);
2391}
2392
2393static int
2394bge_dma_alloc(struct bge_softc *sc)
2395{
2396 bus_addr_t lowaddr;
2388 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2397 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2389 int i, error;
2390
2391 lowaddr = BUS_SPACE_MAXADDR;
2392 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2393 lowaddr = BGE_DMA_MAXADDR;
2394 /*
2395 * Allocate the parent bus DMA tag appropriate for PCI.
2396 */
2397 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2398 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2399 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2400 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2401 if (error != 0) {
2402 device_printf(sc->bge_dev,
2403 "could not allocate parent dma tag\n");
2404 return (ENOMEM);
2405 }
2406
2407 /* Create tag for standard RX ring. */
2408 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2409 &sc->bge_cdata.bge_rx_std_ring_tag,
2410 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2411 &sc->bge_cdata.bge_rx_std_ring_map,
2412 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2413 if (error)
2414 return (error);
2415
2416 /* Create tag for RX return ring. */
2417 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2418 &sc->bge_cdata.bge_rx_return_ring_tag,
2419 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2420 &sc->bge_cdata.bge_rx_return_ring_map,
2421 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2422 if (error)
2423 return (error);
2424
2425 /* Create tag for TX ring. */
2426 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2427 &sc->bge_cdata.bge_tx_ring_tag,
2428 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2429 &sc->bge_cdata.bge_tx_ring_map,
2430 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2431 if (error)
2432 return (error);
2433
2434 /*
2435 * Create tag for status block.
2436 * Because we only use single Tx/Rx/Rx return ring, use
2437 * minimum status block size except BCM5700 AX/BX which
2438 * seems to want to see full status block size regardless
2439 * of configured number of ring.
2440 */
2441 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2442 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2443 sbsz = BGE_STATUS_BLK_SZ;
2444 else
2445 sbsz = 32;
2446 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2447 &sc->bge_cdata.bge_status_tag,
2448 (uint8_t **)&sc->bge_ldata.bge_status_block,
2449 &sc->bge_cdata.bge_status_map,
2450 &sc->bge_ldata.bge_status_block_paddr, "status block");
2451 if (error)
2452 return (error);
2453
2454 /* Create tag for statistics block. */
2455 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2456 &sc->bge_cdata.bge_stats_tag,
2457 (uint8_t **)&sc->bge_ldata.bge_stats,
2458 &sc->bge_cdata.bge_stats_map,
2459 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2460 if (error)
2461 return (error);
2462
2463 /* Create tag for jumbo RX ring. */
2464 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2465 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2466 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2467 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2468 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2469 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2470 if (error)
2471 return (error);
2472 }
2473
2474 /* Create parent tag for buffers. */
2475 boundary = 0;
2476 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2477 boundary = BGE_DMA_BNDRY;
2478 /*
2479 * XXX
2480 * watchdog timeout issue was observed on BCM5704 which
2481 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2482 * Limiting DMA address space to 32bits seems to address
2483 * it.
2484 */
2485 if (sc->bge_flags & BGE_FLAG_PCIX)
2486 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2487 }
2488 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2489 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2490 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2491 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2492 if (error != 0) {
2493 device_printf(sc->bge_dev,
2494 "could not allocate buffer dma tag\n");
2495 return (ENOMEM);
2496 }
2497 /* Create tag for Tx mbufs. */
2498 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2499 txsegsz = BGE_TSOSEG_SZ;
2500 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2501 } else {
2502 txsegsz = MCLBYTES;
2503 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2504 }
2505 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2506 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2507 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2508 &sc->bge_cdata.bge_tx_mtag);
2509
2510 if (error) {
2511 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2512 return (ENOMEM);
2513 }
2514
2515 /* Create tag for Rx mbufs. */
2398 int i, error;
2399
2400 lowaddr = BUS_SPACE_MAXADDR;
2401 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2402 lowaddr = BGE_DMA_MAXADDR;
2403 /*
2404 * Allocate the parent bus DMA tag appropriate for PCI.
2405 */
2406 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2407 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2408 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2409 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2410 if (error != 0) {
2411 device_printf(sc->bge_dev,
2412 "could not allocate parent dma tag\n");
2413 return (ENOMEM);
2414 }
2415
2416 /* Create tag for standard RX ring. */
2417 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2418 &sc->bge_cdata.bge_rx_std_ring_tag,
2419 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2420 &sc->bge_cdata.bge_rx_std_ring_map,
2421 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2422 if (error)
2423 return (error);
2424
2425 /* Create tag for RX return ring. */
2426 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2427 &sc->bge_cdata.bge_rx_return_ring_tag,
2428 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2429 &sc->bge_cdata.bge_rx_return_ring_map,
2430 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2431 if (error)
2432 return (error);
2433
2434 /* Create tag for TX ring. */
2435 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2436 &sc->bge_cdata.bge_tx_ring_tag,
2437 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2438 &sc->bge_cdata.bge_tx_ring_map,
2439 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2440 if (error)
2441 return (error);
2442
2443 /*
2444 * Create tag for status block.
2445 * Because we only use single Tx/Rx/Rx return ring, use
2446 * minimum status block size except BCM5700 AX/BX which
2447 * seems to want to see full status block size regardless
2448 * of configured number of ring.
2449 */
2450 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2451 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2452 sbsz = BGE_STATUS_BLK_SZ;
2453 else
2454 sbsz = 32;
2455 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2456 &sc->bge_cdata.bge_status_tag,
2457 (uint8_t **)&sc->bge_ldata.bge_status_block,
2458 &sc->bge_cdata.bge_status_map,
2459 &sc->bge_ldata.bge_status_block_paddr, "status block");
2460 if (error)
2461 return (error);
2462
2463 /* Create tag for statistics block. */
2464 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2465 &sc->bge_cdata.bge_stats_tag,
2466 (uint8_t **)&sc->bge_ldata.bge_stats,
2467 &sc->bge_cdata.bge_stats_map,
2468 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2469 if (error)
2470 return (error);
2471
2472 /* Create tag for jumbo RX ring. */
2473 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2474 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2475 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2476 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2477 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2478 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2479 if (error)
2480 return (error);
2481 }
2482
2483 /* Create parent tag for buffers. */
2484 boundary = 0;
2485 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2486 boundary = BGE_DMA_BNDRY;
2487 /*
2488 * XXX
2489 * watchdog timeout issue was observed on BCM5704 which
2490 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2491 * Limiting DMA address space to 32bits seems to address
2492 * it.
2493 */
2494 if (sc->bge_flags & BGE_FLAG_PCIX)
2495 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2496 }
2497 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2498 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2499 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2500 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2501 if (error != 0) {
2502 device_printf(sc->bge_dev,
2503 "could not allocate buffer dma tag\n");
2504 return (ENOMEM);
2505 }
2506 /* Create tag for Tx mbufs. */
2507 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2508 txsegsz = BGE_TSOSEG_SZ;
2509 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2510 } else {
2511 txsegsz = MCLBYTES;
2512 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2513 }
2514 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2515 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2516 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2517 &sc->bge_cdata.bge_tx_mtag);
2518
2519 if (error) {
2520 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2521 return (ENOMEM);
2522 }
2523
2524 /* Create tag for Rx mbufs. */
2525 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2526 rxmaxsegsz = MJUM9BYTES;
2527 else
2528 rxmaxsegsz = MCLBYTES;
2516 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2529 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2517 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2518 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2530 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2531 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2519
2520 if (error) {
2521 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2522 return (ENOMEM);
2523 }
2524
2525 /* Create DMA maps for RX buffers. */
2526 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2527 &sc->bge_cdata.bge_rx_std_sparemap);
2528 if (error) {
2529 device_printf(sc->bge_dev,
2530 "can't create spare DMA map for RX\n");
2531 return (ENOMEM);
2532 }
2533 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2534 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2535 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2536 if (error) {
2537 device_printf(sc->bge_dev,
2538 "can't create DMA map for RX\n");
2539 return (ENOMEM);
2540 }
2541 }
2542
2543 /* Create DMA maps for TX buffers. */
2544 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2545 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2546 &sc->bge_cdata.bge_tx_dmamap[i]);
2547 if (error) {
2548 device_printf(sc->bge_dev,
2549 "can't create DMA map for TX\n");
2550 return (ENOMEM);
2551 }
2552 }
2553
2554 /* Create tags for jumbo RX buffers. */
2555 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2556 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2557 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2558 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2559 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2560 if (error) {
2561 device_printf(sc->bge_dev,
2562 "could not allocate jumbo dma tag\n");
2563 return (ENOMEM);
2564 }
2565 /* Create DMA maps for jumbo RX buffers. */
2566 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2567 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2568 if (error) {
2569 device_printf(sc->bge_dev,
2570 "can't create spare DMA map for jumbo RX\n");
2571 return (ENOMEM);
2572 }
2573 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2574 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2575 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2576 if (error) {
2577 device_printf(sc->bge_dev,
2578 "can't create DMA map for jumbo RX\n");
2579 return (ENOMEM);
2580 }
2581 }
2582 }
2583
2584 return (0);
2585}
2586
2587/*
2588 * Return true if this device has more than one port.
2589 */
2590static int
2591bge_has_multiple_ports(struct bge_softc *sc)
2592{
2593 device_t dev = sc->bge_dev;
2594 u_int b, d, f, fscan, s;
2595
2596 d = pci_get_domain(dev);
2597 b = pci_get_bus(dev);
2598 s = pci_get_slot(dev);
2599 f = pci_get_function(dev);
2600 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2601 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2602 return (1);
2603 return (0);
2604}
2605
2606/*
2607 * Return true if MSI can be used with this device.
2608 */
2609static int
2610bge_can_use_msi(struct bge_softc *sc)
2611{
2612 int can_use_msi = 0;
2613
2614 /* Disable MSI for polling(4). */
2615#ifdef DEVICE_POLLING
2616 return (0);
2617#endif
2618 switch (sc->bge_asicrev) {
2619 case BGE_ASICREV_BCM5714_A0:
2620 case BGE_ASICREV_BCM5714:
2621 /*
2622 * Apparently, MSI doesn't work when these chips are
2623 * configured in single-port mode.
2624 */
2625 if (bge_has_multiple_ports(sc))
2626 can_use_msi = 1;
2627 break;
2628 case BGE_ASICREV_BCM5750:
2629 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2630 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2631 can_use_msi = 1;
2632 break;
2633 default:
2634 if (BGE_IS_575X_PLUS(sc))
2635 can_use_msi = 1;
2636 }
2637 return (can_use_msi);
2638}
2639
2640static int
2641bge_attach(device_t dev)
2642{
2643 struct ifnet *ifp;
2644 struct bge_softc *sc;
2645 uint32_t hwcfg = 0, misccfg;
2646 u_char eaddr[ETHER_ADDR_LEN];
2647 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2648
2649 sc = device_get_softc(dev);
2650 sc->bge_dev = dev;
2651
2652 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2653
2654 /*
2655 * Map control/status registers.
2656 */
2657 pci_enable_busmaster(dev);
2658
2659 rid = PCIR_BAR(0);
2660 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2661 RF_ACTIVE);
2662
2663 if (sc->bge_res == NULL) {
2664 device_printf (sc->bge_dev, "couldn't map memory\n");
2665 error = ENXIO;
2666 goto fail;
2667 }
2668
2669 /* Save various chip information. */
2670 sc->bge_chipid =
2671 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2672 BGE_PCIMISCCTL_ASICREV_SHIFT;
2673 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2674 /*
2675 * Find the ASCI revision. Different chips use different
2676 * registers.
2677 */
2678 switch (pci_get_device(dev)) {
2679 case BCOM_DEVICEID_BCM5717:
2680 case BCOM_DEVICEID_BCM5718:
2681 sc->bge_chipid = pci_read_config(dev,
2682 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2683 break;
2684 default:
2685 sc->bge_chipid = pci_read_config(dev,
2686 BGE_PCI_PRODID_ASICREV, 4);
2687 }
2688 }
2689 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2690 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2691
2692 /* Set default PHY address. */
2693 phy_addr = 1;
2694 /*
2695 * PHY address mapping for various devices.
2696 *
2697 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2698 * ---------+-------+-------+-------+-------+
2699 * BCM57XX | 1 | X | X | X |
2700 * BCM5704 | 1 | X | 1 | X |
2701 * BCM5717 | 1 | 8 | 2 | 9 |
2702 *
2703 * Other addresses may respond but they are not
2704 * IEEE compliant PHYs and should be ignored.
2705 */
2706 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
2707 f = pci_get_function(dev);
2708 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2709 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2710 BGE_SGDIGSTS_IS_SERDES)
2711 phy_addr = f + 8;
2712 else
2713 phy_addr = f + 1;
2714 } else if (sc->bge_chipid == BGE_CHIPID_BCM5717_B0) {
2715 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2716 BGE_CPMU_PHY_STRAP_IS_SERDES)
2717 phy_addr = f + 8;
2718 else
2719 phy_addr = f + 1;
2720 }
2721 }
2722
2723 /*
2724 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2725 * 5705 A0 and A1 chips.
2726 */
2727 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2728 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2729 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2730 sc->bge_chipid != BGE_CHIPID_BCM5705_A1 &&
2731 !BGE_IS_5717_PLUS(sc))
2732 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2733
2734 if (bge_has_eaddr(sc))
2735 sc->bge_flags |= BGE_FLAG_EADDR;
2736
2737 /* Save chipset family. */
2738 switch (sc->bge_asicrev) {
2739 case BGE_ASICREV_BCM5717:
2740 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2741 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2742 BGE_FLAG_SHORT_DMA_BUG | BGE_FLAG_JUMBO_FRAME;
2743 break;
2744 case BGE_ASICREV_BCM5755:
2745 case BGE_ASICREV_BCM5761:
2746 case BGE_ASICREV_BCM5784:
2747 case BGE_ASICREV_BCM5785:
2748 case BGE_ASICREV_BCM5787:
2749 case BGE_ASICREV_BCM57780:
2750 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2751 BGE_FLAG_5705_PLUS;
2752 break;
2753 case BGE_ASICREV_BCM5700:
2754 case BGE_ASICREV_BCM5701:
2755 case BGE_ASICREV_BCM5703:
2756 case BGE_ASICREV_BCM5704:
2757 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2758 break;
2759 case BGE_ASICREV_BCM5714_A0:
2760 case BGE_ASICREV_BCM5780:
2761 case BGE_ASICREV_BCM5714:
2532
2533 if (error) {
2534 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2535 return (ENOMEM);
2536 }
2537
2538 /* Create DMA maps for RX buffers. */
2539 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2540 &sc->bge_cdata.bge_rx_std_sparemap);
2541 if (error) {
2542 device_printf(sc->bge_dev,
2543 "can't create spare DMA map for RX\n");
2544 return (ENOMEM);
2545 }
2546 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2547 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2548 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2549 if (error) {
2550 device_printf(sc->bge_dev,
2551 "can't create DMA map for RX\n");
2552 return (ENOMEM);
2553 }
2554 }
2555
2556 /* Create DMA maps for TX buffers. */
2557 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2558 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2559 &sc->bge_cdata.bge_tx_dmamap[i]);
2560 if (error) {
2561 device_printf(sc->bge_dev,
2562 "can't create DMA map for TX\n");
2563 return (ENOMEM);
2564 }
2565 }
2566
2567 /* Create tags for jumbo RX buffers. */
2568 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2569 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2570 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2571 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2572 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2573 if (error) {
2574 device_printf(sc->bge_dev,
2575 "could not allocate jumbo dma tag\n");
2576 return (ENOMEM);
2577 }
2578 /* Create DMA maps for jumbo RX buffers. */
2579 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2580 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2581 if (error) {
2582 device_printf(sc->bge_dev,
2583 "can't create spare DMA map for jumbo RX\n");
2584 return (ENOMEM);
2585 }
2586 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2587 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2588 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2589 if (error) {
2590 device_printf(sc->bge_dev,
2591 "can't create DMA map for jumbo RX\n");
2592 return (ENOMEM);
2593 }
2594 }
2595 }
2596
2597 return (0);
2598}
2599
2600/*
2601 * Return true if this device has more than one port.
2602 */
2603static int
2604bge_has_multiple_ports(struct bge_softc *sc)
2605{
2606 device_t dev = sc->bge_dev;
2607 u_int b, d, f, fscan, s;
2608
2609 d = pci_get_domain(dev);
2610 b = pci_get_bus(dev);
2611 s = pci_get_slot(dev);
2612 f = pci_get_function(dev);
2613 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2614 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2615 return (1);
2616 return (0);
2617}
2618
2619/*
2620 * Return true if MSI can be used with this device.
2621 */
2622static int
2623bge_can_use_msi(struct bge_softc *sc)
2624{
2625 int can_use_msi = 0;
2626
2627 /* Disable MSI for polling(4). */
2628#ifdef DEVICE_POLLING
2629 return (0);
2630#endif
2631 switch (sc->bge_asicrev) {
2632 case BGE_ASICREV_BCM5714_A0:
2633 case BGE_ASICREV_BCM5714:
2634 /*
2635 * Apparently, MSI doesn't work when these chips are
2636 * configured in single-port mode.
2637 */
2638 if (bge_has_multiple_ports(sc))
2639 can_use_msi = 1;
2640 break;
2641 case BGE_ASICREV_BCM5750:
2642 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2643 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2644 can_use_msi = 1;
2645 break;
2646 default:
2647 if (BGE_IS_575X_PLUS(sc))
2648 can_use_msi = 1;
2649 }
2650 return (can_use_msi);
2651}
2652
2653static int
2654bge_attach(device_t dev)
2655{
2656 struct ifnet *ifp;
2657 struct bge_softc *sc;
2658 uint32_t hwcfg = 0, misccfg;
2659 u_char eaddr[ETHER_ADDR_LEN];
2660 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2661
2662 sc = device_get_softc(dev);
2663 sc->bge_dev = dev;
2664
2665 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2666
2667 /*
2668 * Map control/status registers.
2669 */
2670 pci_enable_busmaster(dev);
2671
2672 rid = PCIR_BAR(0);
2673 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2674 RF_ACTIVE);
2675
2676 if (sc->bge_res == NULL) {
2677 device_printf (sc->bge_dev, "couldn't map memory\n");
2678 error = ENXIO;
2679 goto fail;
2680 }
2681
2682 /* Save various chip information. */
2683 sc->bge_chipid =
2684 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2685 BGE_PCIMISCCTL_ASICREV_SHIFT;
2686 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2687 /*
2688 * Find the ASCI revision. Different chips use different
2689 * registers.
2690 */
2691 switch (pci_get_device(dev)) {
2692 case BCOM_DEVICEID_BCM5717:
2693 case BCOM_DEVICEID_BCM5718:
2694 sc->bge_chipid = pci_read_config(dev,
2695 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2696 break;
2697 default:
2698 sc->bge_chipid = pci_read_config(dev,
2699 BGE_PCI_PRODID_ASICREV, 4);
2700 }
2701 }
2702 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2703 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2704
2705 /* Set default PHY address. */
2706 phy_addr = 1;
2707 /*
2708 * PHY address mapping for various devices.
2709 *
2710 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2711 * ---------+-------+-------+-------+-------+
2712 * BCM57XX | 1 | X | X | X |
2713 * BCM5704 | 1 | X | 1 | X |
2714 * BCM5717 | 1 | 8 | 2 | 9 |
2715 *
2716 * Other addresses may respond but they are not
2717 * IEEE compliant PHYs and should be ignored.
2718 */
2719 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
2720 f = pci_get_function(dev);
2721 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2722 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2723 BGE_SGDIGSTS_IS_SERDES)
2724 phy_addr = f + 8;
2725 else
2726 phy_addr = f + 1;
2727 } else if (sc->bge_chipid == BGE_CHIPID_BCM5717_B0) {
2728 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2729 BGE_CPMU_PHY_STRAP_IS_SERDES)
2730 phy_addr = f + 8;
2731 else
2732 phy_addr = f + 1;
2733 }
2734 }
2735
2736 /*
2737 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2738 * 5705 A0 and A1 chips.
2739 */
2740 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2741 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2742 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2743 sc->bge_chipid != BGE_CHIPID_BCM5705_A1 &&
2744 !BGE_IS_5717_PLUS(sc))
2745 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2746
2747 if (bge_has_eaddr(sc))
2748 sc->bge_flags |= BGE_FLAG_EADDR;
2749
2750 /* Save chipset family. */
2751 switch (sc->bge_asicrev) {
2752 case BGE_ASICREV_BCM5717:
2753 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2754 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2755 BGE_FLAG_SHORT_DMA_BUG | BGE_FLAG_JUMBO_FRAME;
2756 break;
2757 case BGE_ASICREV_BCM5755:
2758 case BGE_ASICREV_BCM5761:
2759 case BGE_ASICREV_BCM5784:
2760 case BGE_ASICREV_BCM5785:
2761 case BGE_ASICREV_BCM5787:
2762 case BGE_ASICREV_BCM57780:
2763 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2764 BGE_FLAG_5705_PLUS;
2765 break;
2766 case BGE_ASICREV_BCM5700:
2767 case BGE_ASICREV_BCM5701:
2768 case BGE_ASICREV_BCM5703:
2769 case BGE_ASICREV_BCM5704:
2770 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2771 break;
2772 case BGE_ASICREV_BCM5714_A0:
2773 case BGE_ASICREV_BCM5780:
2774 case BGE_ASICREV_BCM5714:
2762 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2775 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2763 /* FALLTHROUGH */
2764 case BGE_ASICREV_BCM5750:
2765 case BGE_ASICREV_BCM5752:
2766 case BGE_ASICREV_BCM5906:
2767 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2768 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2769 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2770 /* FALLTHROUGH */
2771 case BGE_ASICREV_BCM5705:
2772 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2773 break;
2774 }
2775
2776 /* Set various PHY bug flags. */
2777 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2778 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2779 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2780 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2781 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2782 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2783 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2784 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2785 if (pci_get_subvendor(dev) == DELL_VENDORID)
2786 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2787 if ((BGE_IS_5705_PLUS(sc)) &&
2788 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2789 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2790 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2791 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2792 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2793 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2794 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2795 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2796 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2797 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2798 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2799 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2800 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2801 } else
2802 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2803 }
2804
2805 /* Identify the chips that use an CPMU. */
2806 if (BGE_IS_5717_PLUS(sc) ||
2807 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2808 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2809 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2810 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2811 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2812 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2813 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2814 else
2815 sc->bge_mi_mode = BGE_MIMODE_BASE;
2816 /* Enable auto polling for BCM570[0-5]. */
2817 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2818 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2819
2820 /*
2821 * All controllers that are not 5755 or higher have 4GB
2822 * boundary DMA bug.
2823 * Whenever an address crosses a multiple of the 4GB boundary
2824 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2825 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2826 * state machine will lockup and cause the device to hang.
2827 */
2828 if (BGE_IS_5755_PLUS(sc) == 0)
2829 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2830
2831 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2832 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2833 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2834 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2835 sc->bge_flags |= BGE_FLAG_5788;
2836 }
2837
2838 capmask = BMSR_DEFCAPMASK;
2839 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2840 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2841 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2842 pci_get_vendor(dev) == BCOM_VENDORID &&
2843 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2844 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2845 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2846 (pci_get_vendor(dev) == BCOM_VENDORID &&
2847 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2848 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2849 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2850 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2851 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2852 /* These chips are 10/100 only. */
2853 capmask &= ~BMSR_EXTSTAT;
2854 }
2855
2856 /*
2857 * Some controllers seem to require a special firmware to use
2858 * TSO. But the firmware is not available to FreeBSD and Linux
2859 * claims that the TSO performed by the firmware is slower than
2860 * hardware based TSO. Moreover the firmware based TSO has one
2861 * known bug which can't handle TSO if ethernet header + IP/TCP
2862 * header is greater than 80 bytes. The workaround for the TSO
2863 * bug exist but it seems it's too expensive than not using
2864 * TSO at all. Some hardwares also have the TSO bug so limit
2865 * the TSO to the controllers that are not affected TSO issues
2866 * (e.g. 5755 or higher).
2867 */
2868 if (BGE_IS_5717_PLUS(sc)) {
2869 /* BCM5717 requires different TSO configuration. */
2870 sc->bge_flags |= BGE_FLAG_TSO3;
2871 } else if (BGE_IS_5755_PLUS(sc)) {
2872 /*
2873 * BCM5754 and BCM5787 shares the same ASIC id so
2874 * explicit device id check is required.
2875 * Due to unknown reason TSO does not work on BCM5755M.
2876 */
2877 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2878 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2879 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2880 sc->bge_flags |= BGE_FLAG_TSO;
2881 }
2882
2883 /*
2884 * Check if this is a PCI-X or PCI Express device.
2885 */
2886 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
2887 /*
2888 * Found a PCI Express capabilities register, this
2889 * must be a PCI Express device.
2890 */
2891 sc->bge_flags |= BGE_FLAG_PCIE;
2892 sc->bge_expcap = reg;
2893 if (pci_get_max_read_req(dev) != 4096)
2894 pci_set_max_read_req(dev, 4096);
2895 } else {
2896 /*
2897 * Check if the device is in PCI-X Mode.
2898 * (This bit is not valid on PCI Express controllers.)
2899 */
2900 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
2901 sc->bge_pcixcap = reg;
2902 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2903 BGE_PCISTATE_PCI_BUSMODE) == 0)
2904 sc->bge_flags |= BGE_FLAG_PCIX;
2905 }
2906
2907 /*
2908 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2909 * not actually a MAC controller bug but an issue with the embedded
2910 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2911 */
2912 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2913 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2914 /*
2915 * Allocate the interrupt, using MSI if possible. These devices
2916 * support 8 MSI messages, but only the first one is used in
2917 * normal operation.
2918 */
2919 rid = 0;
2920 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2921 sc->bge_msicap = reg;
2922 if (bge_can_use_msi(sc)) {
2923 msicount = pci_msi_count(dev);
2924 if (msicount > 1)
2925 msicount = 1;
2926 } else
2927 msicount = 0;
2928 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2929 rid = 1;
2930 sc->bge_flags |= BGE_FLAG_MSI;
2931 }
2932 }
2933
2934 /*
2935 * All controllers except BCM5700 supports tagged status but
2936 * we use tagged status only for MSI case on BCM5717. Otherwise
2937 * MSI on BCM5717 does not work.
2938 */
2939#ifndef DEVICE_POLLING
2940 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
2941 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
2942#endif
2943
2944 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2945 RF_SHAREABLE | RF_ACTIVE);
2946
2947 if (sc->bge_irq == NULL) {
2948 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2949 error = ENXIO;
2950 goto fail;
2951 }
2952
2953 device_printf(dev,
2954 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2955 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2956 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2957 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2958
2959 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2960
2961 /* Try to reset the chip. */
2962 if (bge_reset(sc)) {
2963 device_printf(sc->bge_dev, "chip reset failed\n");
2964 error = ENXIO;
2965 goto fail;
2966 }
2967
2968 sc->bge_asf_mode = 0;
2969 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2970 == BGE_MAGIC_NUMBER)) {
2971 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2972 & BGE_HWCFG_ASF) {
2973 sc->bge_asf_mode |= ASF_ENABLE;
2974 sc->bge_asf_mode |= ASF_STACKUP;
2975 if (BGE_IS_575X_PLUS(sc))
2976 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2977 }
2978 }
2979
2980 /* Try to reset the chip again the nice way. */
2981 bge_stop_fw(sc);
2982 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2983 if (bge_reset(sc)) {
2984 device_printf(sc->bge_dev, "chip reset failed\n");
2985 error = ENXIO;
2986 goto fail;
2987 }
2988
2989 bge_sig_legacy(sc, BGE_RESET_STOP);
2990 bge_sig_post_reset(sc, BGE_RESET_STOP);
2991
2992 if (bge_chipinit(sc)) {
2993 device_printf(sc->bge_dev, "chip initialization failed\n");
2994 error = ENXIO;
2995 goto fail;
2996 }
2997
2998 error = bge_get_eaddr(sc, eaddr);
2999 if (error) {
3000 device_printf(sc->bge_dev,
3001 "failed to read station address\n");
3002 error = ENXIO;
3003 goto fail;
3004 }
3005
3006 /* 5705 limits RX return ring to 512 entries. */
3007 if (BGE_IS_5717_PLUS(sc))
3008 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3009 else if (BGE_IS_5705_PLUS(sc))
3010 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3011 else
3012 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3013
3014 if (bge_dma_alloc(sc)) {
3015 device_printf(sc->bge_dev,
3016 "failed to allocate DMA resources\n");
3017 error = ENXIO;
3018 goto fail;
3019 }
3020
3021 bge_add_sysctls(sc);
3022
3023 /* Set default tuneable values. */
3024 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3025 sc->bge_rx_coal_ticks = 150;
3026 sc->bge_tx_coal_ticks = 150;
3027 sc->bge_rx_max_coal_bds = 10;
3028 sc->bge_tx_max_coal_bds = 10;
3029
3030 /* Initialize checksum features to use. */
3031 sc->bge_csum_features = BGE_CSUM_FEATURES;
3032 if (sc->bge_forced_udpcsum != 0)
3033 sc->bge_csum_features |= CSUM_UDP;
3034
3035 /* Set up ifnet structure */
3036 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3037 if (ifp == NULL) {
3038 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3039 error = ENXIO;
3040 goto fail;
3041 }
3042 ifp->if_softc = sc;
3043 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3044 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3045 ifp->if_ioctl = bge_ioctl;
3046 ifp->if_start = bge_start;
3047 ifp->if_init = bge_init;
3048 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3049 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3050 IFQ_SET_READY(&ifp->if_snd);
3051 ifp->if_hwassist = sc->bge_csum_features;
3052 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3053 IFCAP_VLAN_MTU;
3054 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3055 ifp->if_hwassist |= CSUM_TSO;
3056 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3057 }
3058#ifdef IFCAP_VLAN_HWCSUM
3059 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3060#endif
3061 ifp->if_capenable = ifp->if_capabilities;
3062#ifdef DEVICE_POLLING
3063 ifp->if_capabilities |= IFCAP_POLLING;
3064#endif
3065
3066 /*
3067 * 5700 B0 chips do not support checksumming correctly due
3068 * to hardware bugs.
3069 */
3070 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3071 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3072 ifp->if_capenable &= ~IFCAP_HWCSUM;
3073 ifp->if_hwassist = 0;
3074 }
3075
3076 /*
3077 * Figure out what sort of media we have by checking the
3078 * hardware config word in the first 32k of NIC internal memory,
3079 * or fall back to examining the EEPROM if necessary.
3080 * Note: on some BCM5700 cards, this value appears to be unset.
3081 * If that's the case, we have to rely on identifying the NIC
3082 * by its PCI subsystem ID, as we do below for the SysKonnect
3083 * SK-9D41.
3084 */
3085 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3086 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3087 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3088 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3089 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3090 sizeof(hwcfg))) {
3091 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3092 error = ENXIO;
3093 goto fail;
3094 }
3095 hwcfg = ntohl(hwcfg);
3096 }
3097
3098 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3099 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3100 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3101 if (BGE_IS_5714_FAMILY(sc))
3102 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3103 else
3104 sc->bge_flags |= BGE_FLAG_TBI;
3105 }
3106
3107 if (sc->bge_flags & BGE_FLAG_TBI) {
3108 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3109 bge_ifmedia_sts);
3110 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3111 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3112 0, NULL);
3113 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3114 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3115 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3116 } else {
3117 /*
3118 * Do transceiver setup and tell the firmware the
3119 * driver is down so we can try to get access the
3120 * probe if ASF is running. Retry a couple of times
3121 * if we get a conflict with the ASF firmware accessing
3122 * the PHY.
3123 */
3124 trys = 0;
3125 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3126again:
3127 bge_asf_driver_up(sc);
3128
3129 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3130 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3131 MIIF_DOPAUSE);
3132 if (error != 0) {
3133 if (trys++ < 4) {
3134 device_printf(sc->bge_dev, "Try again\n");
3135 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3136 BMCR_RESET);
3137 goto again;
3138 }
3139 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3140 goto fail;
3141 }
3142
3143 /*
3144 * Now tell the firmware we are going up after probing the PHY
3145 */
3146 if (sc->bge_asf_mode & ASF_STACKUP)
3147 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3148 }
3149
3150 /*
3151 * When using the BCM5701 in PCI-X mode, data corruption has
3152 * been observed in the first few bytes of some received packets.
3153 * Aligning the packet buffer in memory eliminates the corruption.
3154 * Unfortunately, this misaligns the packet payloads. On platforms
3155 * which do not support unaligned accesses, we will realign the
3156 * payloads by copying the received packets.
3157 */
3158 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3159 sc->bge_flags & BGE_FLAG_PCIX)
3160 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3161
3162 /*
3163 * Call MI attach routine.
3164 */
3165 ether_ifattach(ifp, eaddr);
3166 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3167
3168 /* Tell upper layer we support long frames. */
3169 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3170
3171 /*
3172 * Hookup IRQ last.
3173 */
3174 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3175 /* Take advantage of single-shot MSI. */
3176 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3177 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3178 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3179 taskqueue_thread_enqueue, &sc->bge_tq);
3180 if (sc->bge_tq == NULL) {
3181 device_printf(dev, "could not create taskqueue.\n");
3182 ether_ifdetach(ifp);
3183 error = ENXIO;
3184 goto fail;
3185 }
3186 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3187 device_get_nameunit(sc->bge_dev));
3188 error = bus_setup_intr(dev, sc->bge_irq,
3189 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3190 &sc->bge_intrhand);
3191 if (error)
3192 ether_ifdetach(ifp);
3193 } else
3194 error = bus_setup_intr(dev, sc->bge_irq,
3195 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3196 &sc->bge_intrhand);
3197
3198 if (error) {
3199 bge_detach(dev);
3200 device_printf(sc->bge_dev, "couldn't set up irq\n");
3201 }
3202
3203 return (0);
3204
3205fail:
3206 bge_release_resources(sc);
3207
3208 return (error);
3209}
3210
3211static int
3212bge_detach(device_t dev)
3213{
3214 struct bge_softc *sc;
3215 struct ifnet *ifp;
3216
3217 sc = device_get_softc(dev);
3218 ifp = sc->bge_ifp;
3219
3220#ifdef DEVICE_POLLING
3221 if (ifp->if_capenable & IFCAP_POLLING)
3222 ether_poll_deregister(ifp);
3223#endif
3224
3225 BGE_LOCK(sc);
3226 bge_stop(sc);
3227 bge_reset(sc);
3228 BGE_UNLOCK(sc);
3229
3230 callout_drain(&sc->bge_stat_ch);
3231
3232 if (sc->bge_tq)
3233 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3234 ether_ifdetach(ifp);
3235
3236 if (sc->bge_flags & BGE_FLAG_TBI) {
3237 ifmedia_removeall(&sc->bge_ifmedia);
3238 } else {
3239 bus_generic_detach(dev);
3240 device_delete_child(dev, sc->bge_miibus);
3241 }
3242
3243 bge_release_resources(sc);
3244
3245 return (0);
3246}
3247
3248static void
3249bge_release_resources(struct bge_softc *sc)
3250{
3251 device_t dev;
3252
3253 dev = sc->bge_dev;
3254
3255 if (sc->bge_tq != NULL)
3256 taskqueue_free(sc->bge_tq);
3257
3258 if (sc->bge_intrhand != NULL)
3259 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3260
3261 if (sc->bge_irq != NULL)
3262 bus_release_resource(dev, SYS_RES_IRQ,
3263 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3264
3265 if (sc->bge_flags & BGE_FLAG_MSI)
3266 pci_release_msi(dev);
3267
3268 if (sc->bge_res != NULL)
3269 bus_release_resource(dev, SYS_RES_MEMORY,
3270 PCIR_BAR(0), sc->bge_res);
3271
3272 if (sc->bge_ifp != NULL)
3273 if_free(sc->bge_ifp);
3274
3275 bge_dma_free(sc);
3276
3277 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3278 BGE_LOCK_DESTROY(sc);
3279}
3280
3281static int
3282bge_reset(struct bge_softc *sc)
3283{
3284 device_t dev;
3285 uint32_t cachesize, command, pcistate, reset, val;
3286 void (*write_op)(struct bge_softc *, int, int);
3287 uint16_t devctl;
3288 int i;
3289
3290 dev = sc->bge_dev;
3291
3292 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3293 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3294 if (sc->bge_flags & BGE_FLAG_PCIE)
3295 write_op = bge_writemem_direct;
3296 else
3297 write_op = bge_writemem_ind;
3298 } else
3299 write_op = bge_writereg_ind;
3300
3301 /* Save some important PCI state. */
3302 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3303 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3304 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3305
3306 pci_write_config(dev, BGE_PCI_MISC_CTL,
3307 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3308 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3309
3310 /* Disable fastboot on controllers that support it. */
3311 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3312 BGE_IS_5755_PLUS(sc)) {
3313 if (bootverbose)
3314 device_printf(dev, "Disabling fastboot\n");
3315 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3316 }
3317
3318 /*
3319 * Write the magic number to SRAM at offset 0xB50.
3320 * When firmware finishes its initialization it will
3321 * write ~BGE_MAGIC_NUMBER to the same location.
3322 */
3323 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3324
3325 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3326
3327 /* XXX: Broadcom Linux driver. */
3328 if (sc->bge_flags & BGE_FLAG_PCIE) {
3329 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3330 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3331 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3332 /* Prevent PCIE link training during global reset */
3333 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3334 reset |= 1 << 29;
3335 }
3336 }
3337
3338 /*
3339 * Set GPHY Power Down Override to leave GPHY
3340 * powered up in D0 uninitialized.
3341 */
3342 if (BGE_IS_5705_PLUS(sc))
3343 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3344
3345 /* Issue global reset */
3346 write_op(sc, BGE_MISC_CFG, reset);
3347
3348 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3349 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3350 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3351 val | BGE_VCPU_STATUS_DRV_RESET);
3352 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3353 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3354 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3355 }
3356
3357 DELAY(1000);
3358
3359 /* XXX: Broadcom Linux driver. */
3360 if (sc->bge_flags & BGE_FLAG_PCIE) {
3361 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3362 DELAY(500000); /* wait for link training to complete */
3363 val = pci_read_config(dev, 0xC4, 4);
3364 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3365 }
3366 devctl = pci_read_config(dev,
3367 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3368 /* Clear enable no snoop and disable relaxed ordering. */
3369 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3370 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3371 /* Set PCIE max payload size to 128. */
3372 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3373 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3374 devctl, 2);
3375 /* Clear error status. */
3376 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3377 PCIM_EXP_STA_CORRECTABLE_ERROR |
3378 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3379 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3380 }
3381
3382 /* Reset some of the PCI state that got zapped by reset. */
3383 pci_write_config(dev, BGE_PCI_MISC_CTL,
3384 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3385 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3386 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3387 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3388 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3389 /*
3390 * Disable PCI-X relaxed ordering to ensure status block update
3391 * comes first then packet buffer DMA. Otherwise driver may
3392 * read stale status block.
3393 */
3394 if (sc->bge_flags & BGE_FLAG_PCIX) {
3395 devctl = pci_read_config(dev,
3396 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3397 devctl &= ~PCIXM_COMMAND_ERO;
3398 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3399 devctl &= ~PCIXM_COMMAND_MAX_READ;
3400 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3401 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3402 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3403 PCIXM_COMMAND_MAX_READ);
3404 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3405 }
3406 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3407 devctl, 2);
3408 }
3409 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3410 if (BGE_IS_5714_FAMILY(sc)) {
3411 /* This chip disables MSI on reset. */
3412 if (sc->bge_flags & BGE_FLAG_MSI) {
3413 val = pci_read_config(dev,
3414 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3415 pci_write_config(dev,
3416 sc->bge_msicap + PCIR_MSI_CTRL,
3417 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3418 val = CSR_READ_4(sc, BGE_MSI_MODE);
3419 CSR_WRITE_4(sc, BGE_MSI_MODE,
3420 val | BGE_MSIMODE_ENABLE);
3421 }
3422 val = CSR_READ_4(sc, BGE_MARB_MODE);
3423 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3424 } else
3425 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3426
3427 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3428 for (i = 0; i < BGE_TIMEOUT; i++) {
3429 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3430 if (val & BGE_VCPU_STATUS_INIT_DONE)
3431 break;
3432 DELAY(100);
3433 }
3434 if (i == BGE_TIMEOUT) {
3435 device_printf(dev, "reset timed out\n");
3436 return (1);
3437 }
3438 } else {
3439 /*
3440 * Poll until we see the 1's complement of the magic number.
3441 * This indicates that the firmware initialization is complete.
3442 * We expect this to fail if no chip containing the Ethernet
3443 * address is fitted though.
3444 */
3445 for (i = 0; i < BGE_TIMEOUT; i++) {
3446 DELAY(10);
3447 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3448 if (val == ~BGE_MAGIC_NUMBER)
3449 break;
3450 }
3451
3452 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3453 device_printf(dev,
3454 "firmware handshake timed out, found 0x%08x\n",
3455 val);
3456 }
3457
3458 /*
3459 * XXX Wait for the value of the PCISTATE register to
3460 * return to its original pre-reset state. This is a
3461 * fairly good indicator of reset completion. If we don't
3462 * wait for the reset to fully complete, trying to read
3463 * from the device's non-PCI registers may yield garbage
3464 * results.
3465 */
3466 for (i = 0; i < BGE_TIMEOUT; i++) {
3467 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3468 break;
3469 DELAY(10);
3470 }
3471
3472 /* Fix up byte swapping. */
3473 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3474 BGE_MODECTL_BYTESWAP_DATA);
3475
3476 /* Tell the ASF firmware we are up */
3477 if (sc->bge_asf_mode & ASF_STACKUP)
3478 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3479
3480 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3481
3482 /*
3483 * The 5704 in TBI mode apparently needs some special
3484 * adjustment to insure the SERDES drive level is set
3485 * to 1.2V.
3486 */
3487 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3488 sc->bge_flags & BGE_FLAG_TBI) {
3489 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3490 val = (val & ~0xFFF) | 0x880;
3491 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3492 }
3493
3494 /* XXX: Broadcom Linux driver. */
3495 if (sc->bge_flags & BGE_FLAG_PCIE &&
3496 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3497 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3498 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3499 /* Enable Data FIFO protection. */
3500 val = CSR_READ_4(sc, 0x7C00);
3501 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3502 }
3503 DELAY(10000);
3504
3505 return (0);
3506}
3507
3508static __inline void
3509bge_rxreuse_std(struct bge_softc *sc, int i)
3510{
3511 struct bge_rx_bd *r;
3512
3513 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3514 r->bge_flags = BGE_RXBDFLAG_END;
3515 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3516 r->bge_idx = i;
3517 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3518}
3519
3520static __inline void
3521bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3522{
3523 struct bge_extrx_bd *r;
3524
3525 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3526 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3527 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3528 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3529 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3530 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3531 r->bge_idx = i;
3532 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3533}
3534
3535/*
3536 * Frame reception handling. This is called if there's a frame
3537 * on the receive return list.
3538 *
3539 * Note: we have to be able to handle two possibilities here:
3540 * 1) the frame is from the jumbo receive ring
3541 * 2) the frame is from the standard receive ring
3542 */
3543
3544static int
3545bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3546{
3547 struct ifnet *ifp;
3548 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3549 uint16_t rx_cons;
3550
3551 rx_cons = sc->bge_rx_saved_considx;
3552
3553 /* Nothing to do. */
3554 if (rx_cons == rx_prod)
3555 return (rx_npkts);
3556
3557 ifp = sc->bge_ifp;
3558
3559 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3560 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3561 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3562 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
2776 /* FALLTHROUGH */
2777 case BGE_ASICREV_BCM5750:
2778 case BGE_ASICREV_BCM5752:
2779 case BGE_ASICREV_BCM5906:
2780 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2781 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2782 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2783 /* FALLTHROUGH */
2784 case BGE_ASICREV_BCM5705:
2785 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2786 break;
2787 }
2788
2789 /* Set various PHY bug flags. */
2790 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2791 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2792 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2793 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2794 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2795 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2796 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2797 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2798 if (pci_get_subvendor(dev) == DELL_VENDORID)
2799 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2800 if ((BGE_IS_5705_PLUS(sc)) &&
2801 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2802 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2803 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2804 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2805 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2806 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2807 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2808 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2809 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2810 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2811 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2812 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2813 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2814 } else
2815 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2816 }
2817
2818 /* Identify the chips that use an CPMU. */
2819 if (BGE_IS_5717_PLUS(sc) ||
2820 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2821 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2822 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2823 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2824 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2825 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2826 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2827 else
2828 sc->bge_mi_mode = BGE_MIMODE_BASE;
2829 /* Enable auto polling for BCM570[0-5]. */
2830 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2831 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2832
2833 /*
2834 * All controllers that are not 5755 or higher have 4GB
2835 * boundary DMA bug.
2836 * Whenever an address crosses a multiple of the 4GB boundary
2837 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2838 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2839 * state machine will lockup and cause the device to hang.
2840 */
2841 if (BGE_IS_5755_PLUS(sc) == 0)
2842 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2843
2844 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2845 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2846 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2847 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2848 sc->bge_flags |= BGE_FLAG_5788;
2849 }
2850
2851 capmask = BMSR_DEFCAPMASK;
2852 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2853 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2854 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2855 pci_get_vendor(dev) == BCOM_VENDORID &&
2856 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2857 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2858 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2859 (pci_get_vendor(dev) == BCOM_VENDORID &&
2860 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2861 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2862 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2863 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2864 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2865 /* These chips are 10/100 only. */
2866 capmask &= ~BMSR_EXTSTAT;
2867 }
2868
2869 /*
2870 * Some controllers seem to require a special firmware to use
2871 * TSO. But the firmware is not available to FreeBSD and Linux
2872 * claims that the TSO performed by the firmware is slower than
2873 * hardware based TSO. Moreover the firmware based TSO has one
2874 * known bug which can't handle TSO if ethernet header + IP/TCP
2875 * header is greater than 80 bytes. The workaround for the TSO
2876 * bug exist but it seems it's too expensive than not using
2877 * TSO at all. Some hardwares also have the TSO bug so limit
2878 * the TSO to the controllers that are not affected TSO issues
2879 * (e.g. 5755 or higher).
2880 */
2881 if (BGE_IS_5717_PLUS(sc)) {
2882 /* BCM5717 requires different TSO configuration. */
2883 sc->bge_flags |= BGE_FLAG_TSO3;
2884 } else if (BGE_IS_5755_PLUS(sc)) {
2885 /*
2886 * BCM5754 and BCM5787 shares the same ASIC id so
2887 * explicit device id check is required.
2888 * Due to unknown reason TSO does not work on BCM5755M.
2889 */
2890 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2891 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2892 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2893 sc->bge_flags |= BGE_FLAG_TSO;
2894 }
2895
2896 /*
2897 * Check if this is a PCI-X or PCI Express device.
2898 */
2899 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
2900 /*
2901 * Found a PCI Express capabilities register, this
2902 * must be a PCI Express device.
2903 */
2904 sc->bge_flags |= BGE_FLAG_PCIE;
2905 sc->bge_expcap = reg;
2906 if (pci_get_max_read_req(dev) != 4096)
2907 pci_set_max_read_req(dev, 4096);
2908 } else {
2909 /*
2910 * Check if the device is in PCI-X Mode.
2911 * (This bit is not valid on PCI Express controllers.)
2912 */
2913 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
2914 sc->bge_pcixcap = reg;
2915 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2916 BGE_PCISTATE_PCI_BUSMODE) == 0)
2917 sc->bge_flags |= BGE_FLAG_PCIX;
2918 }
2919
2920 /*
2921 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2922 * not actually a MAC controller bug but an issue with the embedded
2923 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2924 */
2925 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2926 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2927 /*
2928 * Allocate the interrupt, using MSI if possible. These devices
2929 * support 8 MSI messages, but only the first one is used in
2930 * normal operation.
2931 */
2932 rid = 0;
2933 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2934 sc->bge_msicap = reg;
2935 if (bge_can_use_msi(sc)) {
2936 msicount = pci_msi_count(dev);
2937 if (msicount > 1)
2938 msicount = 1;
2939 } else
2940 msicount = 0;
2941 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2942 rid = 1;
2943 sc->bge_flags |= BGE_FLAG_MSI;
2944 }
2945 }
2946
2947 /*
2948 * All controllers except BCM5700 supports tagged status but
2949 * we use tagged status only for MSI case on BCM5717. Otherwise
2950 * MSI on BCM5717 does not work.
2951 */
2952#ifndef DEVICE_POLLING
2953 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
2954 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
2955#endif
2956
2957 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2958 RF_SHAREABLE | RF_ACTIVE);
2959
2960 if (sc->bge_irq == NULL) {
2961 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2962 error = ENXIO;
2963 goto fail;
2964 }
2965
2966 device_printf(dev,
2967 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2968 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2969 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2970 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2971
2972 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2973
2974 /* Try to reset the chip. */
2975 if (bge_reset(sc)) {
2976 device_printf(sc->bge_dev, "chip reset failed\n");
2977 error = ENXIO;
2978 goto fail;
2979 }
2980
2981 sc->bge_asf_mode = 0;
2982 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2983 == BGE_MAGIC_NUMBER)) {
2984 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2985 & BGE_HWCFG_ASF) {
2986 sc->bge_asf_mode |= ASF_ENABLE;
2987 sc->bge_asf_mode |= ASF_STACKUP;
2988 if (BGE_IS_575X_PLUS(sc))
2989 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2990 }
2991 }
2992
2993 /* Try to reset the chip again the nice way. */
2994 bge_stop_fw(sc);
2995 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2996 if (bge_reset(sc)) {
2997 device_printf(sc->bge_dev, "chip reset failed\n");
2998 error = ENXIO;
2999 goto fail;
3000 }
3001
3002 bge_sig_legacy(sc, BGE_RESET_STOP);
3003 bge_sig_post_reset(sc, BGE_RESET_STOP);
3004
3005 if (bge_chipinit(sc)) {
3006 device_printf(sc->bge_dev, "chip initialization failed\n");
3007 error = ENXIO;
3008 goto fail;
3009 }
3010
3011 error = bge_get_eaddr(sc, eaddr);
3012 if (error) {
3013 device_printf(sc->bge_dev,
3014 "failed to read station address\n");
3015 error = ENXIO;
3016 goto fail;
3017 }
3018
3019 /* 5705 limits RX return ring to 512 entries. */
3020 if (BGE_IS_5717_PLUS(sc))
3021 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3022 else if (BGE_IS_5705_PLUS(sc))
3023 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3024 else
3025 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3026
3027 if (bge_dma_alloc(sc)) {
3028 device_printf(sc->bge_dev,
3029 "failed to allocate DMA resources\n");
3030 error = ENXIO;
3031 goto fail;
3032 }
3033
3034 bge_add_sysctls(sc);
3035
3036 /* Set default tuneable values. */
3037 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3038 sc->bge_rx_coal_ticks = 150;
3039 sc->bge_tx_coal_ticks = 150;
3040 sc->bge_rx_max_coal_bds = 10;
3041 sc->bge_tx_max_coal_bds = 10;
3042
3043 /* Initialize checksum features to use. */
3044 sc->bge_csum_features = BGE_CSUM_FEATURES;
3045 if (sc->bge_forced_udpcsum != 0)
3046 sc->bge_csum_features |= CSUM_UDP;
3047
3048 /* Set up ifnet structure */
3049 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3050 if (ifp == NULL) {
3051 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3052 error = ENXIO;
3053 goto fail;
3054 }
3055 ifp->if_softc = sc;
3056 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3057 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3058 ifp->if_ioctl = bge_ioctl;
3059 ifp->if_start = bge_start;
3060 ifp->if_init = bge_init;
3061 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3062 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3063 IFQ_SET_READY(&ifp->if_snd);
3064 ifp->if_hwassist = sc->bge_csum_features;
3065 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3066 IFCAP_VLAN_MTU;
3067 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3068 ifp->if_hwassist |= CSUM_TSO;
3069 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3070 }
3071#ifdef IFCAP_VLAN_HWCSUM
3072 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3073#endif
3074 ifp->if_capenable = ifp->if_capabilities;
3075#ifdef DEVICE_POLLING
3076 ifp->if_capabilities |= IFCAP_POLLING;
3077#endif
3078
3079 /*
3080 * 5700 B0 chips do not support checksumming correctly due
3081 * to hardware bugs.
3082 */
3083 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3084 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3085 ifp->if_capenable &= ~IFCAP_HWCSUM;
3086 ifp->if_hwassist = 0;
3087 }
3088
3089 /*
3090 * Figure out what sort of media we have by checking the
3091 * hardware config word in the first 32k of NIC internal memory,
3092 * or fall back to examining the EEPROM if necessary.
3093 * Note: on some BCM5700 cards, this value appears to be unset.
3094 * If that's the case, we have to rely on identifying the NIC
3095 * by its PCI subsystem ID, as we do below for the SysKonnect
3096 * SK-9D41.
3097 */
3098 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3099 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3100 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3101 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3102 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3103 sizeof(hwcfg))) {
3104 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3105 error = ENXIO;
3106 goto fail;
3107 }
3108 hwcfg = ntohl(hwcfg);
3109 }
3110
3111 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3112 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3113 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3114 if (BGE_IS_5714_FAMILY(sc))
3115 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3116 else
3117 sc->bge_flags |= BGE_FLAG_TBI;
3118 }
3119
3120 if (sc->bge_flags & BGE_FLAG_TBI) {
3121 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3122 bge_ifmedia_sts);
3123 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3124 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3125 0, NULL);
3126 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3127 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3128 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3129 } else {
3130 /*
3131 * Do transceiver setup and tell the firmware the
3132 * driver is down so we can try to get access the
3133 * probe if ASF is running. Retry a couple of times
3134 * if we get a conflict with the ASF firmware accessing
3135 * the PHY.
3136 */
3137 trys = 0;
3138 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3139again:
3140 bge_asf_driver_up(sc);
3141
3142 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3143 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3144 MIIF_DOPAUSE);
3145 if (error != 0) {
3146 if (trys++ < 4) {
3147 device_printf(sc->bge_dev, "Try again\n");
3148 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3149 BMCR_RESET);
3150 goto again;
3151 }
3152 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3153 goto fail;
3154 }
3155
3156 /*
3157 * Now tell the firmware we are going up after probing the PHY
3158 */
3159 if (sc->bge_asf_mode & ASF_STACKUP)
3160 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3161 }
3162
3163 /*
3164 * When using the BCM5701 in PCI-X mode, data corruption has
3165 * been observed in the first few bytes of some received packets.
3166 * Aligning the packet buffer in memory eliminates the corruption.
3167 * Unfortunately, this misaligns the packet payloads. On platforms
3168 * which do not support unaligned accesses, we will realign the
3169 * payloads by copying the received packets.
3170 */
3171 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3172 sc->bge_flags & BGE_FLAG_PCIX)
3173 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3174
3175 /*
3176 * Call MI attach routine.
3177 */
3178 ether_ifattach(ifp, eaddr);
3179 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3180
3181 /* Tell upper layer we support long frames. */
3182 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3183
3184 /*
3185 * Hookup IRQ last.
3186 */
3187 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3188 /* Take advantage of single-shot MSI. */
3189 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3190 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3191 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3192 taskqueue_thread_enqueue, &sc->bge_tq);
3193 if (sc->bge_tq == NULL) {
3194 device_printf(dev, "could not create taskqueue.\n");
3195 ether_ifdetach(ifp);
3196 error = ENXIO;
3197 goto fail;
3198 }
3199 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3200 device_get_nameunit(sc->bge_dev));
3201 error = bus_setup_intr(dev, sc->bge_irq,
3202 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3203 &sc->bge_intrhand);
3204 if (error)
3205 ether_ifdetach(ifp);
3206 } else
3207 error = bus_setup_intr(dev, sc->bge_irq,
3208 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3209 &sc->bge_intrhand);
3210
3211 if (error) {
3212 bge_detach(dev);
3213 device_printf(sc->bge_dev, "couldn't set up irq\n");
3214 }
3215
3216 return (0);
3217
3218fail:
3219 bge_release_resources(sc);
3220
3221 return (error);
3222}
3223
3224static int
3225bge_detach(device_t dev)
3226{
3227 struct bge_softc *sc;
3228 struct ifnet *ifp;
3229
3230 sc = device_get_softc(dev);
3231 ifp = sc->bge_ifp;
3232
3233#ifdef DEVICE_POLLING
3234 if (ifp->if_capenable & IFCAP_POLLING)
3235 ether_poll_deregister(ifp);
3236#endif
3237
3238 BGE_LOCK(sc);
3239 bge_stop(sc);
3240 bge_reset(sc);
3241 BGE_UNLOCK(sc);
3242
3243 callout_drain(&sc->bge_stat_ch);
3244
3245 if (sc->bge_tq)
3246 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3247 ether_ifdetach(ifp);
3248
3249 if (sc->bge_flags & BGE_FLAG_TBI) {
3250 ifmedia_removeall(&sc->bge_ifmedia);
3251 } else {
3252 bus_generic_detach(dev);
3253 device_delete_child(dev, sc->bge_miibus);
3254 }
3255
3256 bge_release_resources(sc);
3257
3258 return (0);
3259}
3260
3261static void
3262bge_release_resources(struct bge_softc *sc)
3263{
3264 device_t dev;
3265
3266 dev = sc->bge_dev;
3267
3268 if (sc->bge_tq != NULL)
3269 taskqueue_free(sc->bge_tq);
3270
3271 if (sc->bge_intrhand != NULL)
3272 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3273
3274 if (sc->bge_irq != NULL)
3275 bus_release_resource(dev, SYS_RES_IRQ,
3276 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3277
3278 if (sc->bge_flags & BGE_FLAG_MSI)
3279 pci_release_msi(dev);
3280
3281 if (sc->bge_res != NULL)
3282 bus_release_resource(dev, SYS_RES_MEMORY,
3283 PCIR_BAR(0), sc->bge_res);
3284
3285 if (sc->bge_ifp != NULL)
3286 if_free(sc->bge_ifp);
3287
3288 bge_dma_free(sc);
3289
3290 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3291 BGE_LOCK_DESTROY(sc);
3292}
3293
3294static int
3295bge_reset(struct bge_softc *sc)
3296{
3297 device_t dev;
3298 uint32_t cachesize, command, pcistate, reset, val;
3299 void (*write_op)(struct bge_softc *, int, int);
3300 uint16_t devctl;
3301 int i;
3302
3303 dev = sc->bge_dev;
3304
3305 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3306 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3307 if (sc->bge_flags & BGE_FLAG_PCIE)
3308 write_op = bge_writemem_direct;
3309 else
3310 write_op = bge_writemem_ind;
3311 } else
3312 write_op = bge_writereg_ind;
3313
3314 /* Save some important PCI state. */
3315 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3316 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3317 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3318
3319 pci_write_config(dev, BGE_PCI_MISC_CTL,
3320 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3321 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3322
3323 /* Disable fastboot on controllers that support it. */
3324 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3325 BGE_IS_5755_PLUS(sc)) {
3326 if (bootverbose)
3327 device_printf(dev, "Disabling fastboot\n");
3328 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3329 }
3330
3331 /*
3332 * Write the magic number to SRAM at offset 0xB50.
3333 * When firmware finishes its initialization it will
3334 * write ~BGE_MAGIC_NUMBER to the same location.
3335 */
3336 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3337
3338 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3339
3340 /* XXX: Broadcom Linux driver. */
3341 if (sc->bge_flags & BGE_FLAG_PCIE) {
3342 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3343 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3344 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3345 /* Prevent PCIE link training during global reset */
3346 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3347 reset |= 1 << 29;
3348 }
3349 }
3350
3351 /*
3352 * Set GPHY Power Down Override to leave GPHY
3353 * powered up in D0 uninitialized.
3354 */
3355 if (BGE_IS_5705_PLUS(sc))
3356 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3357
3358 /* Issue global reset */
3359 write_op(sc, BGE_MISC_CFG, reset);
3360
3361 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3362 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3363 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3364 val | BGE_VCPU_STATUS_DRV_RESET);
3365 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3366 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3367 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3368 }
3369
3370 DELAY(1000);
3371
3372 /* XXX: Broadcom Linux driver. */
3373 if (sc->bge_flags & BGE_FLAG_PCIE) {
3374 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3375 DELAY(500000); /* wait for link training to complete */
3376 val = pci_read_config(dev, 0xC4, 4);
3377 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3378 }
3379 devctl = pci_read_config(dev,
3380 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3381 /* Clear enable no snoop and disable relaxed ordering. */
3382 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3383 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3384 /* Set PCIE max payload size to 128. */
3385 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3386 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3387 devctl, 2);
3388 /* Clear error status. */
3389 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3390 PCIM_EXP_STA_CORRECTABLE_ERROR |
3391 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3392 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3393 }
3394
3395 /* Reset some of the PCI state that got zapped by reset. */
3396 pci_write_config(dev, BGE_PCI_MISC_CTL,
3397 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3398 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3399 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3400 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3401 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3402 /*
3403 * Disable PCI-X relaxed ordering to ensure status block update
3404 * comes first then packet buffer DMA. Otherwise driver may
3405 * read stale status block.
3406 */
3407 if (sc->bge_flags & BGE_FLAG_PCIX) {
3408 devctl = pci_read_config(dev,
3409 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3410 devctl &= ~PCIXM_COMMAND_ERO;
3411 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3412 devctl &= ~PCIXM_COMMAND_MAX_READ;
3413 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3414 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3415 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3416 PCIXM_COMMAND_MAX_READ);
3417 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3418 }
3419 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3420 devctl, 2);
3421 }
3422 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3423 if (BGE_IS_5714_FAMILY(sc)) {
3424 /* This chip disables MSI on reset. */
3425 if (sc->bge_flags & BGE_FLAG_MSI) {
3426 val = pci_read_config(dev,
3427 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3428 pci_write_config(dev,
3429 sc->bge_msicap + PCIR_MSI_CTRL,
3430 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3431 val = CSR_READ_4(sc, BGE_MSI_MODE);
3432 CSR_WRITE_4(sc, BGE_MSI_MODE,
3433 val | BGE_MSIMODE_ENABLE);
3434 }
3435 val = CSR_READ_4(sc, BGE_MARB_MODE);
3436 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3437 } else
3438 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3439
3440 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3441 for (i = 0; i < BGE_TIMEOUT; i++) {
3442 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3443 if (val & BGE_VCPU_STATUS_INIT_DONE)
3444 break;
3445 DELAY(100);
3446 }
3447 if (i == BGE_TIMEOUT) {
3448 device_printf(dev, "reset timed out\n");
3449 return (1);
3450 }
3451 } else {
3452 /*
3453 * Poll until we see the 1's complement of the magic number.
3454 * This indicates that the firmware initialization is complete.
3455 * We expect this to fail if no chip containing the Ethernet
3456 * address is fitted though.
3457 */
3458 for (i = 0; i < BGE_TIMEOUT; i++) {
3459 DELAY(10);
3460 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3461 if (val == ~BGE_MAGIC_NUMBER)
3462 break;
3463 }
3464
3465 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3466 device_printf(dev,
3467 "firmware handshake timed out, found 0x%08x\n",
3468 val);
3469 }
3470
3471 /*
3472 * XXX Wait for the value of the PCISTATE register to
3473 * return to its original pre-reset state. This is a
3474 * fairly good indicator of reset completion. If we don't
3475 * wait for the reset to fully complete, trying to read
3476 * from the device's non-PCI registers may yield garbage
3477 * results.
3478 */
3479 for (i = 0; i < BGE_TIMEOUT; i++) {
3480 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3481 break;
3482 DELAY(10);
3483 }
3484
3485 /* Fix up byte swapping. */
3486 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3487 BGE_MODECTL_BYTESWAP_DATA);
3488
3489 /* Tell the ASF firmware we are up */
3490 if (sc->bge_asf_mode & ASF_STACKUP)
3491 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3492
3493 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3494
3495 /*
3496 * The 5704 in TBI mode apparently needs some special
3497 * adjustment to insure the SERDES drive level is set
3498 * to 1.2V.
3499 */
3500 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3501 sc->bge_flags & BGE_FLAG_TBI) {
3502 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3503 val = (val & ~0xFFF) | 0x880;
3504 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3505 }
3506
3507 /* XXX: Broadcom Linux driver. */
3508 if (sc->bge_flags & BGE_FLAG_PCIE &&
3509 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3510 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3511 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3512 /* Enable Data FIFO protection. */
3513 val = CSR_READ_4(sc, 0x7C00);
3514 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3515 }
3516 DELAY(10000);
3517
3518 return (0);
3519}
3520
3521static __inline void
3522bge_rxreuse_std(struct bge_softc *sc, int i)
3523{
3524 struct bge_rx_bd *r;
3525
3526 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3527 r->bge_flags = BGE_RXBDFLAG_END;
3528 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3529 r->bge_idx = i;
3530 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3531}
3532
3533static __inline void
3534bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3535{
3536 struct bge_extrx_bd *r;
3537
3538 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3539 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3540 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3541 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3542 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3543 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3544 r->bge_idx = i;
3545 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3546}
3547
3548/*
3549 * Frame reception handling. This is called if there's a frame
3550 * on the receive return list.
3551 *
3552 * Note: we have to be able to handle two possibilities here:
3553 * 1) the frame is from the jumbo receive ring
3554 * 2) the frame is from the standard receive ring
3555 */
3556
3557static int
3558bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3559{
3560 struct ifnet *ifp;
3561 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3562 uint16_t rx_cons;
3563
3564 rx_cons = sc->bge_rx_saved_considx;
3565
3566 /* Nothing to do. */
3567 if (rx_cons == rx_prod)
3568 return (rx_npkts);
3569
3570 ifp = sc->bge_ifp;
3571
3572 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3573 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3574 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3575 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3563 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3576 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3577 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3564 (MCLBYTES - ETHER_ALIGN))
3565 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3566 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3567
3568 while (rx_cons != rx_prod) {
3569 struct bge_rx_bd *cur_rx;
3570 uint32_t rxidx;
3571 struct mbuf *m = NULL;
3572 uint16_t vlan_tag = 0;
3573 int have_tag = 0;
3574
3575#ifdef DEVICE_POLLING
3576 if (ifp->if_capenable & IFCAP_POLLING) {
3577 if (sc->rxcycles <= 0)
3578 break;
3579 sc->rxcycles--;
3580 }
3581#endif
3582
3583 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3584
3585 rxidx = cur_rx->bge_idx;
3586 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3587
3588 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3589 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3590 have_tag = 1;
3591 vlan_tag = cur_rx->bge_vlan_tag;
3592 }
3593
3594 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3595 jumbocnt++;
3596 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3597 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3598 bge_rxreuse_jumbo(sc, rxidx);
3599 continue;
3600 }
3601 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3602 bge_rxreuse_jumbo(sc, rxidx);
3603 ifp->if_iqdrops++;
3604 continue;
3605 }
3606 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3607 } else {
3608 stdcnt++;
3609 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3610 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3611 bge_rxreuse_std(sc, rxidx);
3612 continue;
3613 }
3614 if (bge_newbuf_std(sc, rxidx) != 0) {
3615 bge_rxreuse_std(sc, rxidx);
3616 ifp->if_iqdrops++;
3617 continue;
3618 }
3619 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3620 }
3621
3622 ifp->if_ipackets++;
3623#ifndef __NO_STRICT_ALIGNMENT
3624 /*
3625 * For architectures with strict alignment we must make sure
3626 * the payload is aligned.
3627 */
3628 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3629 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3630 cur_rx->bge_len);
3631 m->m_data += ETHER_ALIGN;
3632 }
3633#endif
3634 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3635 m->m_pkthdr.rcvif = ifp;
3636
3637 if (ifp->if_capenable & IFCAP_RXCSUM)
3638 bge_rxcsum(sc, cur_rx, m);
3639
3640 /*
3641 * If we received a packet with a vlan tag,
3642 * attach that information to the packet.
3643 */
3644 if (have_tag) {
3645 m->m_pkthdr.ether_vtag = vlan_tag;
3646 m->m_flags |= M_VLANTAG;
3647 }
3648
3649 if (holdlck != 0) {
3650 BGE_UNLOCK(sc);
3651 (*ifp->if_input)(ifp, m);
3652 BGE_LOCK(sc);
3653 } else
3654 (*ifp->if_input)(ifp, m);
3655 rx_npkts++;
3656
3657 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3658 return (rx_npkts);
3659 }
3660
3661 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3662 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3663 if (stdcnt > 0)
3664 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3665 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3666
3667 if (jumbocnt > 0)
3668 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3669 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3670
3671 sc->bge_rx_saved_considx = rx_cons;
3672 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3673 if (stdcnt)
3674 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3675 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3676 if (jumbocnt)
3677 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3678 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3679#ifdef notyet
3680 /*
3681 * This register wraps very quickly under heavy packet drops.
3682 * If you need correct statistics, you can enable this check.
3683 */
3684 if (BGE_IS_5705_PLUS(sc))
3685 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3686#endif
3687 return (rx_npkts);
3688}
3689
3690static void
3691bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3692{
3693
3694 if (BGE_IS_5717_PLUS(sc)) {
3695 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3696 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3697 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3698 if ((cur_rx->bge_error_flag &
3699 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3700 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3701 }
3702 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3703 m->m_pkthdr.csum_data =
3704 cur_rx->bge_tcp_udp_csum;
3705 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3706 CSUM_PSEUDO_HDR;
3707 }
3708 }
3709 } else {
3710 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3711 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3712 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3713 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3714 }
3715 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3716 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3717 m->m_pkthdr.csum_data =
3718 cur_rx->bge_tcp_udp_csum;
3719 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3720 CSUM_PSEUDO_HDR;
3721 }
3722 }
3723}
3724
3725static void
3726bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3727{
3728 struct bge_tx_bd *cur_tx;
3729 struct ifnet *ifp;
3730
3731 BGE_LOCK_ASSERT(sc);
3732
3733 /* Nothing to do. */
3734 if (sc->bge_tx_saved_considx == tx_cons)
3735 return;
3736
3737 ifp = sc->bge_ifp;
3738
3739 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3740 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3741 /*
3742 * Go through our tx ring and free mbufs for those
3743 * frames that have been sent.
3744 */
3745 while (sc->bge_tx_saved_considx != tx_cons) {
3746 uint32_t idx;
3747
3748 idx = sc->bge_tx_saved_considx;
3749 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3750 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3751 ifp->if_opackets++;
3752 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3753 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3754 sc->bge_cdata.bge_tx_dmamap[idx],
3755 BUS_DMASYNC_POSTWRITE);
3756 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3757 sc->bge_cdata.bge_tx_dmamap[idx]);
3758 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3759 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3760 }
3761 sc->bge_txcnt--;
3762 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3763 }
3764
3765 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3766 if (sc->bge_txcnt == 0)
3767 sc->bge_timer = 0;
3768}
3769
3770#ifdef DEVICE_POLLING
3771static int
3772bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3773{
3774 struct bge_softc *sc = ifp->if_softc;
3775 uint16_t rx_prod, tx_cons;
3776 uint32_t statusword;
3777 int rx_npkts = 0;
3778
3779 BGE_LOCK(sc);
3780 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3781 BGE_UNLOCK(sc);
3782 return (rx_npkts);
3783 }
3784
3785 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3786 sc->bge_cdata.bge_status_map,
3787 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3788 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3789 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3790
3791 statusword = sc->bge_ldata.bge_status_block->bge_status;
3792 sc->bge_ldata.bge_status_block->bge_status = 0;
3793
3794 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3795 sc->bge_cdata.bge_status_map,
3796 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3797
3798 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3799 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3800 sc->bge_link_evt++;
3801
3802 if (cmd == POLL_AND_CHECK_STATUS)
3803 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3804 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3805 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3806 bge_link_upd(sc);
3807
3808 sc->rxcycles = count;
3809 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3810 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3811 BGE_UNLOCK(sc);
3812 return (rx_npkts);
3813 }
3814 bge_txeof(sc, tx_cons);
3815 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3816 bge_start_locked(ifp);
3817
3818 BGE_UNLOCK(sc);
3819 return (rx_npkts);
3820}
3821#endif /* DEVICE_POLLING */
3822
3823static int
3824bge_msi_intr(void *arg)
3825{
3826 struct bge_softc *sc;
3827
3828 sc = (struct bge_softc *)arg;
3829 /*
3830 * This interrupt is not shared and controller already
3831 * disabled further interrupt.
3832 */
3833 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3834 return (FILTER_HANDLED);
3835}
3836
3837static void
3838bge_intr_task(void *arg, int pending)
3839{
3840 struct bge_softc *sc;
3841 struct ifnet *ifp;
3842 uint32_t status, status_tag;
3843 uint16_t rx_prod, tx_cons;
3844
3845 sc = (struct bge_softc *)arg;
3846 ifp = sc->bge_ifp;
3847
3848 BGE_LOCK(sc);
3849 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3850 BGE_UNLOCK(sc);
3851 return;
3852 }
3853
3854 /* Get updated status block. */
3855 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3856 sc->bge_cdata.bge_status_map,
3857 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3858
3859 /* Save producer/consumer indexess. */
3860 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3861 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3862 status = sc->bge_ldata.bge_status_block->bge_status;
3863 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3864 sc->bge_ldata.bge_status_block->bge_status = 0;
3865 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3866 sc->bge_cdata.bge_status_map,
3867 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3868 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3869 status_tag = 0;
3870
3871 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3872 bge_link_upd(sc);
3873
3874 /* Let controller work. */
3875 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
3876
3877 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3878 sc->bge_rx_saved_considx != rx_prod) {
3879 /* Check RX return ring producer/consumer. */
3880 BGE_UNLOCK(sc);
3881 bge_rxeof(sc, rx_prod, 0);
3882 BGE_LOCK(sc);
3883 }
3884 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3885 /* Check TX ring producer/consumer. */
3886 bge_txeof(sc, tx_cons);
3887 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3888 bge_start_locked(ifp);
3889 }
3890 BGE_UNLOCK(sc);
3891}
3892
3893static void
3894bge_intr(void *xsc)
3895{
3896 struct bge_softc *sc;
3897 struct ifnet *ifp;
3898 uint32_t statusword;
3899 uint16_t rx_prod, tx_cons;
3900
3901 sc = xsc;
3902
3903 BGE_LOCK(sc);
3904
3905 ifp = sc->bge_ifp;
3906
3907#ifdef DEVICE_POLLING
3908 if (ifp->if_capenable & IFCAP_POLLING) {
3909 BGE_UNLOCK(sc);
3910 return;
3911 }
3912#endif
3913
3914 /*
3915 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3916 * disable interrupts by writing nonzero like we used to, since with
3917 * our current organization this just gives complications and
3918 * pessimizations for re-enabling interrupts. We used to have races
3919 * instead of the necessary complications. Disabling interrupts
3920 * would just reduce the chance of a status update while we are
3921 * running (by switching to the interrupt-mode coalescence
3922 * parameters), but this chance is already very low so it is more
3923 * efficient to get another interrupt than prevent it.
3924 *
3925 * We do the ack first to ensure another interrupt if there is a
3926 * status update after the ack. We don't check for the status
3927 * changing later because it is more efficient to get another
3928 * interrupt than prevent it, not quite as above (not checking is
3929 * a smaller optimization than not toggling the interrupt enable,
3930 * since checking doesn't involve PCI accesses and toggling require
3931 * the status check). So toggling would probably be a pessimization
3932 * even with MSI. It would only be needed for using a task queue.
3933 */
3934 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3935
3936 /*
3937 * Do the mandatory PCI flush as well as get the link status.
3938 */
3939 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3940
3941 /* Make sure the descriptor ring indexes are coherent. */
3942 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3943 sc->bge_cdata.bge_status_map,
3944 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3945 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3946 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3947 sc->bge_ldata.bge_status_block->bge_status = 0;
3948 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3949 sc->bge_cdata.bge_status_map,
3950 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3951
3952 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3953 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3954 statusword || sc->bge_link_evt)
3955 bge_link_upd(sc);
3956
3957 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3958 /* Check RX return ring producer/consumer. */
3959 bge_rxeof(sc, rx_prod, 1);
3960 }
3961
3962 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3963 /* Check TX ring producer/consumer. */
3964 bge_txeof(sc, tx_cons);
3965 }
3966
3967 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3968 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3969 bge_start_locked(ifp);
3970
3971 BGE_UNLOCK(sc);
3972}
3973
3974static void
3975bge_asf_driver_up(struct bge_softc *sc)
3976{
3977 if (sc->bge_asf_mode & ASF_STACKUP) {
3978 /* Send ASF heartbeat aprox. every 2s */
3979 if (sc->bge_asf_count)
3980 sc->bge_asf_count --;
3981 else {
3982 sc->bge_asf_count = 2;
3983 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3984 BGE_FW_DRV_ALIVE);
3985 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3986 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3987 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3988 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3989 }
3990 }
3991}
3992
3993static void
3994bge_tick(void *xsc)
3995{
3996 struct bge_softc *sc = xsc;
3997 struct mii_data *mii = NULL;
3998
3999 BGE_LOCK_ASSERT(sc);
4000
4001 /* Synchronize with possible callout reset/stop. */
4002 if (callout_pending(&sc->bge_stat_ch) ||
4003 !callout_active(&sc->bge_stat_ch))
4004 return;
4005
4006 if (BGE_IS_5705_PLUS(sc))
4007 bge_stats_update_regs(sc);
4008 else
4009 bge_stats_update(sc);
4010
4011 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4012 mii = device_get_softc(sc->bge_miibus);
4013 /*
4014 * Do not touch PHY if we have link up. This could break
4015 * IPMI/ASF mode or produce extra input errors
4016 * (extra errors was reported for bcm5701 & bcm5704).
4017 */
4018 if (!sc->bge_link)
4019 mii_tick(mii);
4020 } else {
4021 /*
4022 * Since in TBI mode auto-polling can't be used we should poll
4023 * link status manually. Here we register pending link event
4024 * and trigger interrupt.
4025 */
4026#ifdef DEVICE_POLLING
4027 /* In polling mode we poll link state in bge_poll(). */
4028 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4029#endif
4030 {
4031 sc->bge_link_evt++;
4032 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4033 sc->bge_flags & BGE_FLAG_5788)
4034 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4035 else
4036 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4037 }
4038 }
4039
4040 bge_asf_driver_up(sc);
4041 bge_watchdog(sc);
4042
4043 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4044}
4045
4046static void
4047bge_stats_update_regs(struct bge_softc *sc)
4048{
4049 struct ifnet *ifp;
4050 struct bge_mac_stats *stats;
4051
4052 ifp = sc->bge_ifp;
4053 stats = &sc->bge_mac_stats;
4054
4055 stats->ifHCOutOctets +=
4056 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4057 stats->etherStatsCollisions +=
4058 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4059 stats->outXonSent +=
4060 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4061 stats->outXoffSent +=
4062 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4063 stats->dot3StatsInternalMacTransmitErrors +=
4064 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4065 stats->dot3StatsSingleCollisionFrames +=
4066 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4067 stats->dot3StatsMultipleCollisionFrames +=
4068 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4069 stats->dot3StatsDeferredTransmissions +=
4070 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4071 stats->dot3StatsExcessiveCollisions +=
4072 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4073 stats->dot3StatsLateCollisions +=
4074 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4075 stats->ifHCOutUcastPkts +=
4076 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4077 stats->ifHCOutMulticastPkts +=
4078 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4079 stats->ifHCOutBroadcastPkts +=
4080 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4081
4082 stats->ifHCInOctets +=
4083 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4084 stats->etherStatsFragments +=
4085 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4086 stats->ifHCInUcastPkts +=
4087 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4088 stats->ifHCInMulticastPkts +=
4089 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4090 stats->ifHCInBroadcastPkts +=
4091 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4092 stats->dot3StatsFCSErrors +=
4093 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4094 stats->dot3StatsAlignmentErrors +=
4095 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4096 stats->xonPauseFramesReceived +=
4097 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4098 stats->xoffPauseFramesReceived +=
4099 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4100 stats->macControlFramesReceived +=
4101 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4102 stats->xoffStateEntered +=
4103 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4104 stats->dot3StatsFramesTooLong +=
4105 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4106 stats->etherStatsJabbers +=
4107 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4108 stats->etherStatsUndersizePkts +=
4109 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4110
4111 stats->FramesDroppedDueToFilters +=
4112 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4113 stats->DmaWriteQueueFull +=
4114 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4115 stats->DmaWriteHighPriQueueFull +=
4116 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4117 stats->NoMoreRxBDs +=
4118 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4119 stats->InputDiscards +=
4120 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4121 stats->InputErrors +=
4122 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4123 stats->RecvThresholdHit +=
4124 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4125
4126 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4127 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4128 stats->InputErrors);
4129}
4130
4131static void
4132bge_stats_clear_regs(struct bge_softc *sc)
4133{
4134
4135 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4136 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4137 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4138 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4139 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4140 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4141 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4142 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4143 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4144 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4145 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4146 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4147 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4148
4149 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4150 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4151 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4152 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4153 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4154 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4155 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4156 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4157 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4158 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4159 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4160 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4161 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4162 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4163
4164 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4165 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4166 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4167 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4168 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4169 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4170 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4171}
4172
4173static void
4174bge_stats_update(struct bge_softc *sc)
4175{
4176 struct ifnet *ifp;
4177 bus_size_t stats;
4178 uint32_t cnt; /* current register value */
4179
4180 ifp = sc->bge_ifp;
4181
4182 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4183
4184#define READ_STAT(sc, stats, stat) \
4185 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4186
4187 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4188 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4189 sc->bge_tx_collisions = cnt;
4190
4191 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4192 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4193 sc->bge_rx_discards = cnt;
4194
4195 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4196 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4197 sc->bge_tx_discards = cnt;
4198
4199#undef READ_STAT
4200}
4201
4202/*
4203 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4204 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4205 * but when such padded frames employ the bge IP/TCP checksum offload,
4206 * the hardware checksum assist gives incorrect results (possibly
4207 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4208 * If we pad such runts with zeros, the onboard checksum comes out correct.
4209 */
4210static __inline int
4211bge_cksum_pad(struct mbuf *m)
4212{
4213 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4214 struct mbuf *last;
4215
4216 /* If there's only the packet-header and we can pad there, use it. */
4217 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4218 M_TRAILINGSPACE(m) >= padlen) {
4219 last = m;
4220 } else {
4221 /*
4222 * Walk packet chain to find last mbuf. We will either
4223 * pad there, or append a new mbuf and pad it.
4224 */
4225 for (last = m; last->m_next != NULL; last = last->m_next);
4226 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4227 /* Allocate new empty mbuf, pad it. Compact later. */
4228 struct mbuf *n;
4229
4230 MGET(n, M_DONTWAIT, MT_DATA);
4231 if (n == NULL)
4232 return (ENOBUFS);
4233 n->m_len = 0;
4234 last->m_next = n;
4235 last = n;
4236 }
4237 }
4238
4239 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4240 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4241 last->m_len += padlen;
4242 m->m_pkthdr.len += padlen;
4243
4244 return (0);
4245}
4246
4247static struct mbuf *
4248bge_check_short_dma(struct mbuf *m)
4249{
4250 struct mbuf *n;
4251 int found;
4252
4253 /*
4254 * If device receive two back-to-back send BDs with less than
4255 * or equal to 8 total bytes then the device may hang. The two
4256 * back-to-back send BDs must in the same frame for this failure
4257 * to occur. Scan mbuf chains and see whether two back-to-back
4258 * send BDs are there. If this is the case, allocate new mbuf
4259 * and copy the frame to workaround the silicon bug.
4260 */
4261 for (n = m, found = 0; n != NULL; n = n->m_next) {
4262 if (n->m_len < 8) {
4263 found++;
4264 if (found > 1)
4265 break;
4266 continue;
4267 }
4268 found = 0;
4269 }
4270
4271 if (found > 1) {
4272 n = m_defrag(m, M_DONTWAIT);
4273 if (n == NULL)
4274 m_freem(m);
4275 } else
4276 n = m;
4277 return (n);
4278}
4279
4280static struct mbuf *
4281bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4282 uint16_t *flags)
4283{
4284 struct ip *ip;
4285 struct tcphdr *tcp;
4286 struct mbuf *n;
4287 uint16_t hlen;
4288 uint32_t poff;
4289
4290 if (M_WRITABLE(m) == 0) {
4291 /* Get a writable copy. */
4292 n = m_dup(m, M_DONTWAIT);
4293 m_freem(m);
4294 if (n == NULL)
4295 return (NULL);
4296 m = n;
4297 }
4298 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4299 if (m == NULL)
4300 return (NULL);
4301 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4302 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4303 m = m_pullup(m, poff + sizeof(struct tcphdr));
4304 if (m == NULL)
4305 return (NULL);
4306 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4307 m = m_pullup(m, poff + (tcp->th_off << 2));
4308 if (m == NULL)
4309 return (NULL);
4310 /*
4311 * It seems controller doesn't modify IP length and TCP pseudo
4312 * checksum. These checksum computed by upper stack should be 0.
4313 */
4314 *mss = m->m_pkthdr.tso_segsz;
4315 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4316 ip->ip_sum = 0;
4317 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4318 /* Clear pseudo checksum computed by TCP stack. */
4319 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4320 tcp->th_sum = 0;
4321 /*
4322 * Broadcom controllers uses different descriptor format for
4323 * TSO depending on ASIC revision. Due to TSO-capable firmware
4324 * license issue and lower performance of firmware based TSO
4325 * we only support hardware based TSO.
4326 */
4327 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4328 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4329 if (sc->bge_flags & BGE_FLAG_TSO3) {
4330 /*
4331 * For BCM5717 and newer controllers, hardware based TSO
4332 * uses the 14 lower bits of the bge_mss field to store the
4333 * MSS and the upper 2 bits to store the lowest 2 bits of
4334 * the IP/TCP header length. The upper 6 bits of the header
4335 * length are stored in the bge_flags[14:10,4] field. Jumbo
4336 * frames are supported.
4337 */
4338 *mss |= ((hlen & 0x3) << 14);
4339 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4340 } else {
4341 /*
4342 * For BCM5755 and newer controllers, hardware based TSO uses
4343 * the lower 11 bits to store the MSS and the upper 5 bits to
4344 * store the IP/TCP header length. Jumbo frames are not
4345 * supported.
4346 */
4347 *mss |= (hlen << 11);
4348 }
4349 return (m);
4350}
4351
4352/*
4353 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4354 * pointers to descriptors.
4355 */
4356static int
4357bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4358{
4359 bus_dma_segment_t segs[BGE_NSEG_NEW];
4360 bus_dmamap_t map;
4361 struct bge_tx_bd *d;
4362 struct mbuf *m = *m_head;
4363 uint32_t idx = *txidx;
4364 uint16_t csum_flags, mss, vlan_tag;
4365 int nsegs, i, error;
4366
4367 csum_flags = 0;
4368 mss = 0;
4369 vlan_tag = 0;
4370 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4371 m->m_next != NULL) {
4372 *m_head = bge_check_short_dma(m);
4373 if (*m_head == NULL)
4374 return (ENOBUFS);
4375 m = *m_head;
4376 }
4377 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4378 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4379 if (*m_head == NULL)
4380 return (ENOBUFS);
4381 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4382 BGE_TXBDFLAG_CPU_POST_DMA;
4383 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4384 if (m->m_pkthdr.csum_flags & CSUM_IP)
4385 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4386 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4387 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4388 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4389 (error = bge_cksum_pad(m)) != 0) {
4390 m_freem(m);
4391 *m_head = NULL;
4392 return (error);
4393 }
4394 }
4395 if (m->m_flags & M_LASTFRAG)
4396 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4397 else if (m->m_flags & M_FRAG)
4398 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4399 }
4400
4401 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4402 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4403 m->m_pkthdr.len > ETHER_MAX_LEN)
4404 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4405 if (sc->bge_forced_collapse > 0 &&
4406 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4407 /*
4408 * Forcedly collapse mbuf chains to overcome hardware
4409 * limitation which only support a single outstanding
4410 * DMA read operation.
4411 */
4412 if (sc->bge_forced_collapse == 1)
4413 m = m_defrag(m, M_DONTWAIT);
4414 else
4415 m = m_collapse(m, M_DONTWAIT,
4416 sc->bge_forced_collapse);
4417 if (m == NULL)
4418 m = *m_head;
4419 *m_head = m;
4420 }
4421 }
4422
4423 map = sc->bge_cdata.bge_tx_dmamap[idx];
4424 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4425 &nsegs, BUS_DMA_NOWAIT);
4426 if (error == EFBIG) {
4427 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4428 if (m == NULL) {
4429 m_freem(*m_head);
4430 *m_head = NULL;
4431 return (ENOBUFS);
4432 }
4433 *m_head = m;
4434 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4435 m, segs, &nsegs, BUS_DMA_NOWAIT);
4436 if (error) {
4437 m_freem(m);
4438 *m_head = NULL;
4439 return (error);
4440 }
4441 } else if (error != 0)
4442 return (error);
4443
4444 /* Check if we have enough free send BDs. */
4445 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4446 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4447 return (ENOBUFS);
4448 }
4449
4450 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4451
4452 if (m->m_flags & M_VLANTAG) {
4453 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4454 vlan_tag = m->m_pkthdr.ether_vtag;
4455 }
4456 for (i = 0; ; i++) {
4457 d = &sc->bge_ldata.bge_tx_ring[idx];
4458 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4459 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4460 d->bge_len = segs[i].ds_len;
4461 d->bge_flags = csum_flags;
4462 d->bge_vlan_tag = vlan_tag;
4463 d->bge_mss = mss;
4464 if (i == nsegs - 1)
4465 break;
4466 BGE_INC(idx, BGE_TX_RING_CNT);
4467 }
4468
4469 /* Mark the last segment as end of packet... */
4470 d->bge_flags |= BGE_TXBDFLAG_END;
4471
4472 /*
4473 * Insure that the map for this transmission
4474 * is placed at the array index of the last descriptor
4475 * in this chain.
4476 */
4477 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4478 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4479 sc->bge_cdata.bge_tx_chain[idx] = m;
4480 sc->bge_txcnt += nsegs;
4481
4482 BGE_INC(idx, BGE_TX_RING_CNT);
4483 *txidx = idx;
4484
4485 return (0);
4486}
4487
4488/*
4489 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4490 * to the mbuf data regions directly in the transmit descriptors.
4491 */
4492static void
4493bge_start_locked(struct ifnet *ifp)
4494{
4495 struct bge_softc *sc;
4496 struct mbuf *m_head;
4497 uint32_t prodidx;
4498 int count;
4499
4500 sc = ifp->if_softc;
4501 BGE_LOCK_ASSERT(sc);
4502
4503 if (!sc->bge_link ||
4504 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4505 IFF_DRV_RUNNING)
4506 return;
4507
4508 prodidx = sc->bge_tx_prodidx;
4509
4510 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4511 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4512 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4513 break;
4514 }
4515 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4516 if (m_head == NULL)
4517 break;
4518
4519 /*
4520 * XXX
4521 * The code inside the if() block is never reached since we
4522 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4523 * requests to checksum TCP/UDP in a fragmented packet.
4524 *
4525 * XXX
4526 * safety overkill. If this is a fragmented packet chain
4527 * with delayed TCP/UDP checksums, then only encapsulate
4528 * it if we have enough descriptors to handle the entire
4529 * chain at once.
4530 * (paranoia -- may not actually be needed)
4531 */
4532 if (m_head->m_flags & M_FIRSTFRAG &&
4533 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4534 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4535 m_head->m_pkthdr.csum_data + 16) {
4536 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4537 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4538 break;
4539 }
4540 }
4541
4542 /*
4543 * Pack the data into the transmit ring. If we
4544 * don't have room, set the OACTIVE flag and wait
4545 * for the NIC to drain the ring.
4546 */
4547 if (bge_encap(sc, &m_head, &prodidx)) {
4548 if (m_head == NULL)
4549 break;
4550 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4551 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4552 break;
4553 }
4554 ++count;
4555
4556 /*
4557 * If there's a BPF listener, bounce a copy of this frame
4558 * to him.
4559 */
4560#ifdef ETHER_BPF_MTAP
4561 ETHER_BPF_MTAP(ifp, m_head);
4562#else
4563 BPF_MTAP(ifp, m_head);
4564#endif
4565 }
4566
4567 if (count > 0) {
4568 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4569 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4570 /* Transmit. */
4571 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4572 /* 5700 b2 errata */
4573 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4574 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4575
4576 sc->bge_tx_prodidx = prodidx;
4577
4578 /*
4579 * Set a timeout in case the chip goes out to lunch.
4580 */
4581 sc->bge_timer = 5;
4582 }
4583}
4584
4585/*
4586 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4587 * to the mbuf data regions directly in the transmit descriptors.
4588 */
4589static void
4590bge_start(struct ifnet *ifp)
4591{
4592 struct bge_softc *sc;
4593
4594 sc = ifp->if_softc;
4595 BGE_LOCK(sc);
4596 bge_start_locked(ifp);
4597 BGE_UNLOCK(sc);
4598}
4599
4600static void
4601bge_init_locked(struct bge_softc *sc)
4602{
4603 struct ifnet *ifp;
4604 uint16_t *m;
4605 uint32_t mode;
4606
4607 BGE_LOCK_ASSERT(sc);
4608
4609 ifp = sc->bge_ifp;
4610
4611 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4612 return;
4613
4614 /* Cancel pending I/O and flush buffers. */
4615 bge_stop(sc);
4616
4617 bge_stop_fw(sc);
4618 bge_sig_pre_reset(sc, BGE_RESET_START);
4619 bge_reset(sc);
4620 bge_sig_legacy(sc, BGE_RESET_START);
4621 bge_sig_post_reset(sc, BGE_RESET_START);
4622
4623 bge_chipinit(sc);
4624
4625 /*
4626 * Init the various state machines, ring
4627 * control blocks and firmware.
4628 */
4629 if (bge_blockinit(sc)) {
4630 device_printf(sc->bge_dev, "initialization failure\n");
4631 return;
4632 }
4633
4634 ifp = sc->bge_ifp;
4635
4636 /* Specify MTU. */
4637 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4638 ETHER_HDR_LEN + ETHER_CRC_LEN +
4639 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4640
4641 /* Load our MAC address. */
4642 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4643 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4644 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4645
4646 /* Program promiscuous mode. */
4647 bge_setpromisc(sc);
4648
4649 /* Program multicast filter. */
4650 bge_setmulti(sc);
4651
4652 /* Program VLAN tag stripping. */
4653 bge_setvlan(sc);
4654
4655 /* Override UDP checksum offloading. */
4656 if (sc->bge_forced_udpcsum == 0)
4657 sc->bge_csum_features &= ~CSUM_UDP;
4658 else
4659 sc->bge_csum_features |= CSUM_UDP;
4660 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4661 ifp->if_capenable & IFCAP_TXCSUM) {
4662 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4663 ifp->if_hwassist |= sc->bge_csum_features;
4664 }
4665
4666 /* Init RX ring. */
4667 if (bge_init_rx_ring_std(sc) != 0) {
4668 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4669 bge_stop(sc);
4670 return;
4671 }
4672
4673 /*
4674 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4675 * memory to insure that the chip has in fact read the first
4676 * entry of the ring.
4677 */
4678 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4679 uint32_t v, i;
4680 for (i = 0; i < 10; i++) {
4681 DELAY(20);
4682 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4683 if (v == (MCLBYTES - ETHER_ALIGN))
4684 break;
4685 }
4686 if (i == 10)
4687 device_printf (sc->bge_dev,
4688 "5705 A0 chip failed to load RX ring\n");
4689 }
4690
4691 /* Init jumbo RX ring. */
3578 (MCLBYTES - ETHER_ALIGN))
3579 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3580 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3581
3582 while (rx_cons != rx_prod) {
3583 struct bge_rx_bd *cur_rx;
3584 uint32_t rxidx;
3585 struct mbuf *m = NULL;
3586 uint16_t vlan_tag = 0;
3587 int have_tag = 0;
3588
3589#ifdef DEVICE_POLLING
3590 if (ifp->if_capenable & IFCAP_POLLING) {
3591 if (sc->rxcycles <= 0)
3592 break;
3593 sc->rxcycles--;
3594 }
3595#endif
3596
3597 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3598
3599 rxidx = cur_rx->bge_idx;
3600 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3601
3602 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3603 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3604 have_tag = 1;
3605 vlan_tag = cur_rx->bge_vlan_tag;
3606 }
3607
3608 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3609 jumbocnt++;
3610 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3611 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3612 bge_rxreuse_jumbo(sc, rxidx);
3613 continue;
3614 }
3615 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3616 bge_rxreuse_jumbo(sc, rxidx);
3617 ifp->if_iqdrops++;
3618 continue;
3619 }
3620 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3621 } else {
3622 stdcnt++;
3623 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3624 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3625 bge_rxreuse_std(sc, rxidx);
3626 continue;
3627 }
3628 if (bge_newbuf_std(sc, rxidx) != 0) {
3629 bge_rxreuse_std(sc, rxidx);
3630 ifp->if_iqdrops++;
3631 continue;
3632 }
3633 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3634 }
3635
3636 ifp->if_ipackets++;
3637#ifndef __NO_STRICT_ALIGNMENT
3638 /*
3639 * For architectures with strict alignment we must make sure
3640 * the payload is aligned.
3641 */
3642 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3643 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3644 cur_rx->bge_len);
3645 m->m_data += ETHER_ALIGN;
3646 }
3647#endif
3648 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3649 m->m_pkthdr.rcvif = ifp;
3650
3651 if (ifp->if_capenable & IFCAP_RXCSUM)
3652 bge_rxcsum(sc, cur_rx, m);
3653
3654 /*
3655 * If we received a packet with a vlan tag,
3656 * attach that information to the packet.
3657 */
3658 if (have_tag) {
3659 m->m_pkthdr.ether_vtag = vlan_tag;
3660 m->m_flags |= M_VLANTAG;
3661 }
3662
3663 if (holdlck != 0) {
3664 BGE_UNLOCK(sc);
3665 (*ifp->if_input)(ifp, m);
3666 BGE_LOCK(sc);
3667 } else
3668 (*ifp->if_input)(ifp, m);
3669 rx_npkts++;
3670
3671 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3672 return (rx_npkts);
3673 }
3674
3675 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3676 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3677 if (stdcnt > 0)
3678 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3679 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3680
3681 if (jumbocnt > 0)
3682 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3683 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3684
3685 sc->bge_rx_saved_considx = rx_cons;
3686 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3687 if (stdcnt)
3688 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3689 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3690 if (jumbocnt)
3691 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3692 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3693#ifdef notyet
3694 /*
3695 * This register wraps very quickly under heavy packet drops.
3696 * If you need correct statistics, you can enable this check.
3697 */
3698 if (BGE_IS_5705_PLUS(sc))
3699 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3700#endif
3701 return (rx_npkts);
3702}
3703
3704static void
3705bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3706{
3707
3708 if (BGE_IS_5717_PLUS(sc)) {
3709 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3710 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3711 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3712 if ((cur_rx->bge_error_flag &
3713 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3714 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3715 }
3716 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3717 m->m_pkthdr.csum_data =
3718 cur_rx->bge_tcp_udp_csum;
3719 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3720 CSUM_PSEUDO_HDR;
3721 }
3722 }
3723 } else {
3724 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3725 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3726 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3727 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3728 }
3729 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3730 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3731 m->m_pkthdr.csum_data =
3732 cur_rx->bge_tcp_udp_csum;
3733 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3734 CSUM_PSEUDO_HDR;
3735 }
3736 }
3737}
3738
3739static void
3740bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3741{
3742 struct bge_tx_bd *cur_tx;
3743 struct ifnet *ifp;
3744
3745 BGE_LOCK_ASSERT(sc);
3746
3747 /* Nothing to do. */
3748 if (sc->bge_tx_saved_considx == tx_cons)
3749 return;
3750
3751 ifp = sc->bge_ifp;
3752
3753 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3754 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3755 /*
3756 * Go through our tx ring and free mbufs for those
3757 * frames that have been sent.
3758 */
3759 while (sc->bge_tx_saved_considx != tx_cons) {
3760 uint32_t idx;
3761
3762 idx = sc->bge_tx_saved_considx;
3763 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3764 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3765 ifp->if_opackets++;
3766 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3767 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3768 sc->bge_cdata.bge_tx_dmamap[idx],
3769 BUS_DMASYNC_POSTWRITE);
3770 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3771 sc->bge_cdata.bge_tx_dmamap[idx]);
3772 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3773 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3774 }
3775 sc->bge_txcnt--;
3776 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3777 }
3778
3779 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3780 if (sc->bge_txcnt == 0)
3781 sc->bge_timer = 0;
3782}
3783
3784#ifdef DEVICE_POLLING
3785static int
3786bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3787{
3788 struct bge_softc *sc = ifp->if_softc;
3789 uint16_t rx_prod, tx_cons;
3790 uint32_t statusword;
3791 int rx_npkts = 0;
3792
3793 BGE_LOCK(sc);
3794 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3795 BGE_UNLOCK(sc);
3796 return (rx_npkts);
3797 }
3798
3799 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3800 sc->bge_cdata.bge_status_map,
3801 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3802 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3803 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3804
3805 statusword = sc->bge_ldata.bge_status_block->bge_status;
3806 sc->bge_ldata.bge_status_block->bge_status = 0;
3807
3808 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3809 sc->bge_cdata.bge_status_map,
3810 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3811
3812 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3813 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3814 sc->bge_link_evt++;
3815
3816 if (cmd == POLL_AND_CHECK_STATUS)
3817 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3818 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3819 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3820 bge_link_upd(sc);
3821
3822 sc->rxcycles = count;
3823 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3824 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3825 BGE_UNLOCK(sc);
3826 return (rx_npkts);
3827 }
3828 bge_txeof(sc, tx_cons);
3829 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3830 bge_start_locked(ifp);
3831
3832 BGE_UNLOCK(sc);
3833 return (rx_npkts);
3834}
3835#endif /* DEVICE_POLLING */
3836
3837static int
3838bge_msi_intr(void *arg)
3839{
3840 struct bge_softc *sc;
3841
3842 sc = (struct bge_softc *)arg;
3843 /*
3844 * This interrupt is not shared and controller already
3845 * disabled further interrupt.
3846 */
3847 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3848 return (FILTER_HANDLED);
3849}
3850
3851static void
3852bge_intr_task(void *arg, int pending)
3853{
3854 struct bge_softc *sc;
3855 struct ifnet *ifp;
3856 uint32_t status, status_tag;
3857 uint16_t rx_prod, tx_cons;
3858
3859 sc = (struct bge_softc *)arg;
3860 ifp = sc->bge_ifp;
3861
3862 BGE_LOCK(sc);
3863 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3864 BGE_UNLOCK(sc);
3865 return;
3866 }
3867
3868 /* Get updated status block. */
3869 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3870 sc->bge_cdata.bge_status_map,
3871 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3872
3873 /* Save producer/consumer indexess. */
3874 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3875 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3876 status = sc->bge_ldata.bge_status_block->bge_status;
3877 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3878 sc->bge_ldata.bge_status_block->bge_status = 0;
3879 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3880 sc->bge_cdata.bge_status_map,
3881 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3882 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3883 status_tag = 0;
3884
3885 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3886 bge_link_upd(sc);
3887
3888 /* Let controller work. */
3889 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
3890
3891 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3892 sc->bge_rx_saved_considx != rx_prod) {
3893 /* Check RX return ring producer/consumer. */
3894 BGE_UNLOCK(sc);
3895 bge_rxeof(sc, rx_prod, 0);
3896 BGE_LOCK(sc);
3897 }
3898 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3899 /* Check TX ring producer/consumer. */
3900 bge_txeof(sc, tx_cons);
3901 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3902 bge_start_locked(ifp);
3903 }
3904 BGE_UNLOCK(sc);
3905}
3906
3907static void
3908bge_intr(void *xsc)
3909{
3910 struct bge_softc *sc;
3911 struct ifnet *ifp;
3912 uint32_t statusword;
3913 uint16_t rx_prod, tx_cons;
3914
3915 sc = xsc;
3916
3917 BGE_LOCK(sc);
3918
3919 ifp = sc->bge_ifp;
3920
3921#ifdef DEVICE_POLLING
3922 if (ifp->if_capenable & IFCAP_POLLING) {
3923 BGE_UNLOCK(sc);
3924 return;
3925 }
3926#endif
3927
3928 /*
3929 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3930 * disable interrupts by writing nonzero like we used to, since with
3931 * our current organization this just gives complications and
3932 * pessimizations for re-enabling interrupts. We used to have races
3933 * instead of the necessary complications. Disabling interrupts
3934 * would just reduce the chance of a status update while we are
3935 * running (by switching to the interrupt-mode coalescence
3936 * parameters), but this chance is already very low so it is more
3937 * efficient to get another interrupt than prevent it.
3938 *
3939 * We do the ack first to ensure another interrupt if there is a
3940 * status update after the ack. We don't check for the status
3941 * changing later because it is more efficient to get another
3942 * interrupt than prevent it, not quite as above (not checking is
3943 * a smaller optimization than not toggling the interrupt enable,
3944 * since checking doesn't involve PCI accesses and toggling require
3945 * the status check). So toggling would probably be a pessimization
3946 * even with MSI. It would only be needed for using a task queue.
3947 */
3948 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3949
3950 /*
3951 * Do the mandatory PCI flush as well as get the link status.
3952 */
3953 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3954
3955 /* Make sure the descriptor ring indexes are coherent. */
3956 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3957 sc->bge_cdata.bge_status_map,
3958 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3959 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3960 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3961 sc->bge_ldata.bge_status_block->bge_status = 0;
3962 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3963 sc->bge_cdata.bge_status_map,
3964 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3965
3966 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3967 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3968 statusword || sc->bge_link_evt)
3969 bge_link_upd(sc);
3970
3971 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3972 /* Check RX return ring producer/consumer. */
3973 bge_rxeof(sc, rx_prod, 1);
3974 }
3975
3976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3977 /* Check TX ring producer/consumer. */
3978 bge_txeof(sc, tx_cons);
3979 }
3980
3981 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3982 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3983 bge_start_locked(ifp);
3984
3985 BGE_UNLOCK(sc);
3986}
3987
3988static void
3989bge_asf_driver_up(struct bge_softc *sc)
3990{
3991 if (sc->bge_asf_mode & ASF_STACKUP) {
3992 /* Send ASF heartbeat aprox. every 2s */
3993 if (sc->bge_asf_count)
3994 sc->bge_asf_count --;
3995 else {
3996 sc->bge_asf_count = 2;
3997 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3998 BGE_FW_DRV_ALIVE);
3999 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
4000 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
4001 CSR_WRITE_4(sc, BGE_CPU_EVENT,
4002 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
4003 }
4004 }
4005}
4006
4007static void
4008bge_tick(void *xsc)
4009{
4010 struct bge_softc *sc = xsc;
4011 struct mii_data *mii = NULL;
4012
4013 BGE_LOCK_ASSERT(sc);
4014
4015 /* Synchronize with possible callout reset/stop. */
4016 if (callout_pending(&sc->bge_stat_ch) ||
4017 !callout_active(&sc->bge_stat_ch))
4018 return;
4019
4020 if (BGE_IS_5705_PLUS(sc))
4021 bge_stats_update_regs(sc);
4022 else
4023 bge_stats_update(sc);
4024
4025 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4026 mii = device_get_softc(sc->bge_miibus);
4027 /*
4028 * Do not touch PHY if we have link up. This could break
4029 * IPMI/ASF mode or produce extra input errors
4030 * (extra errors was reported for bcm5701 & bcm5704).
4031 */
4032 if (!sc->bge_link)
4033 mii_tick(mii);
4034 } else {
4035 /*
4036 * Since in TBI mode auto-polling can't be used we should poll
4037 * link status manually. Here we register pending link event
4038 * and trigger interrupt.
4039 */
4040#ifdef DEVICE_POLLING
4041 /* In polling mode we poll link state in bge_poll(). */
4042 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4043#endif
4044 {
4045 sc->bge_link_evt++;
4046 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4047 sc->bge_flags & BGE_FLAG_5788)
4048 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4049 else
4050 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4051 }
4052 }
4053
4054 bge_asf_driver_up(sc);
4055 bge_watchdog(sc);
4056
4057 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4058}
4059
4060static void
4061bge_stats_update_regs(struct bge_softc *sc)
4062{
4063 struct ifnet *ifp;
4064 struct bge_mac_stats *stats;
4065
4066 ifp = sc->bge_ifp;
4067 stats = &sc->bge_mac_stats;
4068
4069 stats->ifHCOutOctets +=
4070 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4071 stats->etherStatsCollisions +=
4072 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4073 stats->outXonSent +=
4074 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4075 stats->outXoffSent +=
4076 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4077 stats->dot3StatsInternalMacTransmitErrors +=
4078 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4079 stats->dot3StatsSingleCollisionFrames +=
4080 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4081 stats->dot3StatsMultipleCollisionFrames +=
4082 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4083 stats->dot3StatsDeferredTransmissions +=
4084 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4085 stats->dot3StatsExcessiveCollisions +=
4086 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4087 stats->dot3StatsLateCollisions +=
4088 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4089 stats->ifHCOutUcastPkts +=
4090 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4091 stats->ifHCOutMulticastPkts +=
4092 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4093 stats->ifHCOutBroadcastPkts +=
4094 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4095
4096 stats->ifHCInOctets +=
4097 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4098 stats->etherStatsFragments +=
4099 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4100 stats->ifHCInUcastPkts +=
4101 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4102 stats->ifHCInMulticastPkts +=
4103 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4104 stats->ifHCInBroadcastPkts +=
4105 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4106 stats->dot3StatsFCSErrors +=
4107 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4108 stats->dot3StatsAlignmentErrors +=
4109 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4110 stats->xonPauseFramesReceived +=
4111 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4112 stats->xoffPauseFramesReceived +=
4113 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4114 stats->macControlFramesReceived +=
4115 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4116 stats->xoffStateEntered +=
4117 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4118 stats->dot3StatsFramesTooLong +=
4119 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4120 stats->etherStatsJabbers +=
4121 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4122 stats->etherStatsUndersizePkts +=
4123 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4124
4125 stats->FramesDroppedDueToFilters +=
4126 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4127 stats->DmaWriteQueueFull +=
4128 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4129 stats->DmaWriteHighPriQueueFull +=
4130 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4131 stats->NoMoreRxBDs +=
4132 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4133 stats->InputDiscards +=
4134 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4135 stats->InputErrors +=
4136 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4137 stats->RecvThresholdHit +=
4138 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4139
4140 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4141 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4142 stats->InputErrors);
4143}
4144
4145static void
4146bge_stats_clear_regs(struct bge_softc *sc)
4147{
4148
4149 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4150 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4151 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4152 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4153 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4154 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4155 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4156 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4157 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4158 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4159 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4160 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4161 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4162
4163 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4164 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4165 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4166 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4167 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4168 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4169 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4170 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4171 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4172 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4173 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4174 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4175 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4176 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4177
4178 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4179 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4180 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4181 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4182 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4183 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4184 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4185}
4186
4187static void
4188bge_stats_update(struct bge_softc *sc)
4189{
4190 struct ifnet *ifp;
4191 bus_size_t stats;
4192 uint32_t cnt; /* current register value */
4193
4194 ifp = sc->bge_ifp;
4195
4196 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4197
4198#define READ_STAT(sc, stats, stat) \
4199 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4200
4201 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4202 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4203 sc->bge_tx_collisions = cnt;
4204
4205 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4206 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4207 sc->bge_rx_discards = cnt;
4208
4209 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4210 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4211 sc->bge_tx_discards = cnt;
4212
4213#undef READ_STAT
4214}
4215
4216/*
4217 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4218 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4219 * but when such padded frames employ the bge IP/TCP checksum offload,
4220 * the hardware checksum assist gives incorrect results (possibly
4221 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4222 * If we pad such runts with zeros, the onboard checksum comes out correct.
4223 */
4224static __inline int
4225bge_cksum_pad(struct mbuf *m)
4226{
4227 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4228 struct mbuf *last;
4229
4230 /* If there's only the packet-header and we can pad there, use it. */
4231 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4232 M_TRAILINGSPACE(m) >= padlen) {
4233 last = m;
4234 } else {
4235 /*
4236 * Walk packet chain to find last mbuf. We will either
4237 * pad there, or append a new mbuf and pad it.
4238 */
4239 for (last = m; last->m_next != NULL; last = last->m_next);
4240 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4241 /* Allocate new empty mbuf, pad it. Compact later. */
4242 struct mbuf *n;
4243
4244 MGET(n, M_DONTWAIT, MT_DATA);
4245 if (n == NULL)
4246 return (ENOBUFS);
4247 n->m_len = 0;
4248 last->m_next = n;
4249 last = n;
4250 }
4251 }
4252
4253 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4254 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4255 last->m_len += padlen;
4256 m->m_pkthdr.len += padlen;
4257
4258 return (0);
4259}
4260
4261static struct mbuf *
4262bge_check_short_dma(struct mbuf *m)
4263{
4264 struct mbuf *n;
4265 int found;
4266
4267 /*
4268 * If device receive two back-to-back send BDs with less than
4269 * or equal to 8 total bytes then the device may hang. The two
4270 * back-to-back send BDs must in the same frame for this failure
4271 * to occur. Scan mbuf chains and see whether two back-to-back
4272 * send BDs are there. If this is the case, allocate new mbuf
4273 * and copy the frame to workaround the silicon bug.
4274 */
4275 for (n = m, found = 0; n != NULL; n = n->m_next) {
4276 if (n->m_len < 8) {
4277 found++;
4278 if (found > 1)
4279 break;
4280 continue;
4281 }
4282 found = 0;
4283 }
4284
4285 if (found > 1) {
4286 n = m_defrag(m, M_DONTWAIT);
4287 if (n == NULL)
4288 m_freem(m);
4289 } else
4290 n = m;
4291 return (n);
4292}
4293
4294static struct mbuf *
4295bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4296 uint16_t *flags)
4297{
4298 struct ip *ip;
4299 struct tcphdr *tcp;
4300 struct mbuf *n;
4301 uint16_t hlen;
4302 uint32_t poff;
4303
4304 if (M_WRITABLE(m) == 0) {
4305 /* Get a writable copy. */
4306 n = m_dup(m, M_DONTWAIT);
4307 m_freem(m);
4308 if (n == NULL)
4309 return (NULL);
4310 m = n;
4311 }
4312 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4313 if (m == NULL)
4314 return (NULL);
4315 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4316 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4317 m = m_pullup(m, poff + sizeof(struct tcphdr));
4318 if (m == NULL)
4319 return (NULL);
4320 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4321 m = m_pullup(m, poff + (tcp->th_off << 2));
4322 if (m == NULL)
4323 return (NULL);
4324 /*
4325 * It seems controller doesn't modify IP length and TCP pseudo
4326 * checksum. These checksum computed by upper stack should be 0.
4327 */
4328 *mss = m->m_pkthdr.tso_segsz;
4329 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4330 ip->ip_sum = 0;
4331 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4332 /* Clear pseudo checksum computed by TCP stack. */
4333 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4334 tcp->th_sum = 0;
4335 /*
4336 * Broadcom controllers uses different descriptor format for
4337 * TSO depending on ASIC revision. Due to TSO-capable firmware
4338 * license issue and lower performance of firmware based TSO
4339 * we only support hardware based TSO.
4340 */
4341 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4342 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4343 if (sc->bge_flags & BGE_FLAG_TSO3) {
4344 /*
4345 * For BCM5717 and newer controllers, hardware based TSO
4346 * uses the 14 lower bits of the bge_mss field to store the
4347 * MSS and the upper 2 bits to store the lowest 2 bits of
4348 * the IP/TCP header length. The upper 6 bits of the header
4349 * length are stored in the bge_flags[14:10,4] field. Jumbo
4350 * frames are supported.
4351 */
4352 *mss |= ((hlen & 0x3) << 14);
4353 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4354 } else {
4355 /*
4356 * For BCM5755 and newer controllers, hardware based TSO uses
4357 * the lower 11 bits to store the MSS and the upper 5 bits to
4358 * store the IP/TCP header length. Jumbo frames are not
4359 * supported.
4360 */
4361 *mss |= (hlen << 11);
4362 }
4363 return (m);
4364}
4365
4366/*
4367 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4368 * pointers to descriptors.
4369 */
4370static int
4371bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4372{
4373 bus_dma_segment_t segs[BGE_NSEG_NEW];
4374 bus_dmamap_t map;
4375 struct bge_tx_bd *d;
4376 struct mbuf *m = *m_head;
4377 uint32_t idx = *txidx;
4378 uint16_t csum_flags, mss, vlan_tag;
4379 int nsegs, i, error;
4380
4381 csum_flags = 0;
4382 mss = 0;
4383 vlan_tag = 0;
4384 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4385 m->m_next != NULL) {
4386 *m_head = bge_check_short_dma(m);
4387 if (*m_head == NULL)
4388 return (ENOBUFS);
4389 m = *m_head;
4390 }
4391 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4392 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4393 if (*m_head == NULL)
4394 return (ENOBUFS);
4395 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4396 BGE_TXBDFLAG_CPU_POST_DMA;
4397 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4398 if (m->m_pkthdr.csum_flags & CSUM_IP)
4399 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4400 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4401 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4402 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4403 (error = bge_cksum_pad(m)) != 0) {
4404 m_freem(m);
4405 *m_head = NULL;
4406 return (error);
4407 }
4408 }
4409 if (m->m_flags & M_LASTFRAG)
4410 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4411 else if (m->m_flags & M_FRAG)
4412 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4413 }
4414
4415 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4416 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4417 m->m_pkthdr.len > ETHER_MAX_LEN)
4418 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4419 if (sc->bge_forced_collapse > 0 &&
4420 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4421 /*
4422 * Forcedly collapse mbuf chains to overcome hardware
4423 * limitation which only support a single outstanding
4424 * DMA read operation.
4425 */
4426 if (sc->bge_forced_collapse == 1)
4427 m = m_defrag(m, M_DONTWAIT);
4428 else
4429 m = m_collapse(m, M_DONTWAIT,
4430 sc->bge_forced_collapse);
4431 if (m == NULL)
4432 m = *m_head;
4433 *m_head = m;
4434 }
4435 }
4436
4437 map = sc->bge_cdata.bge_tx_dmamap[idx];
4438 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4439 &nsegs, BUS_DMA_NOWAIT);
4440 if (error == EFBIG) {
4441 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4442 if (m == NULL) {
4443 m_freem(*m_head);
4444 *m_head = NULL;
4445 return (ENOBUFS);
4446 }
4447 *m_head = m;
4448 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4449 m, segs, &nsegs, BUS_DMA_NOWAIT);
4450 if (error) {
4451 m_freem(m);
4452 *m_head = NULL;
4453 return (error);
4454 }
4455 } else if (error != 0)
4456 return (error);
4457
4458 /* Check if we have enough free send BDs. */
4459 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4460 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4461 return (ENOBUFS);
4462 }
4463
4464 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4465
4466 if (m->m_flags & M_VLANTAG) {
4467 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4468 vlan_tag = m->m_pkthdr.ether_vtag;
4469 }
4470 for (i = 0; ; i++) {
4471 d = &sc->bge_ldata.bge_tx_ring[idx];
4472 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4473 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4474 d->bge_len = segs[i].ds_len;
4475 d->bge_flags = csum_flags;
4476 d->bge_vlan_tag = vlan_tag;
4477 d->bge_mss = mss;
4478 if (i == nsegs - 1)
4479 break;
4480 BGE_INC(idx, BGE_TX_RING_CNT);
4481 }
4482
4483 /* Mark the last segment as end of packet... */
4484 d->bge_flags |= BGE_TXBDFLAG_END;
4485
4486 /*
4487 * Insure that the map for this transmission
4488 * is placed at the array index of the last descriptor
4489 * in this chain.
4490 */
4491 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4492 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4493 sc->bge_cdata.bge_tx_chain[idx] = m;
4494 sc->bge_txcnt += nsegs;
4495
4496 BGE_INC(idx, BGE_TX_RING_CNT);
4497 *txidx = idx;
4498
4499 return (0);
4500}
4501
4502/*
4503 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4504 * to the mbuf data regions directly in the transmit descriptors.
4505 */
4506static void
4507bge_start_locked(struct ifnet *ifp)
4508{
4509 struct bge_softc *sc;
4510 struct mbuf *m_head;
4511 uint32_t prodidx;
4512 int count;
4513
4514 sc = ifp->if_softc;
4515 BGE_LOCK_ASSERT(sc);
4516
4517 if (!sc->bge_link ||
4518 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4519 IFF_DRV_RUNNING)
4520 return;
4521
4522 prodidx = sc->bge_tx_prodidx;
4523
4524 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4525 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4526 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4527 break;
4528 }
4529 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4530 if (m_head == NULL)
4531 break;
4532
4533 /*
4534 * XXX
4535 * The code inside the if() block is never reached since we
4536 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4537 * requests to checksum TCP/UDP in a fragmented packet.
4538 *
4539 * XXX
4540 * safety overkill. If this is a fragmented packet chain
4541 * with delayed TCP/UDP checksums, then only encapsulate
4542 * it if we have enough descriptors to handle the entire
4543 * chain at once.
4544 * (paranoia -- may not actually be needed)
4545 */
4546 if (m_head->m_flags & M_FIRSTFRAG &&
4547 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4548 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4549 m_head->m_pkthdr.csum_data + 16) {
4550 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4551 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4552 break;
4553 }
4554 }
4555
4556 /*
4557 * Pack the data into the transmit ring. If we
4558 * don't have room, set the OACTIVE flag and wait
4559 * for the NIC to drain the ring.
4560 */
4561 if (bge_encap(sc, &m_head, &prodidx)) {
4562 if (m_head == NULL)
4563 break;
4564 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4565 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4566 break;
4567 }
4568 ++count;
4569
4570 /*
4571 * If there's a BPF listener, bounce a copy of this frame
4572 * to him.
4573 */
4574#ifdef ETHER_BPF_MTAP
4575 ETHER_BPF_MTAP(ifp, m_head);
4576#else
4577 BPF_MTAP(ifp, m_head);
4578#endif
4579 }
4580
4581 if (count > 0) {
4582 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4583 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4584 /* Transmit. */
4585 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4586 /* 5700 b2 errata */
4587 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4588 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4589
4590 sc->bge_tx_prodidx = prodidx;
4591
4592 /*
4593 * Set a timeout in case the chip goes out to lunch.
4594 */
4595 sc->bge_timer = 5;
4596 }
4597}
4598
4599/*
4600 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4601 * to the mbuf data regions directly in the transmit descriptors.
4602 */
4603static void
4604bge_start(struct ifnet *ifp)
4605{
4606 struct bge_softc *sc;
4607
4608 sc = ifp->if_softc;
4609 BGE_LOCK(sc);
4610 bge_start_locked(ifp);
4611 BGE_UNLOCK(sc);
4612}
4613
4614static void
4615bge_init_locked(struct bge_softc *sc)
4616{
4617 struct ifnet *ifp;
4618 uint16_t *m;
4619 uint32_t mode;
4620
4621 BGE_LOCK_ASSERT(sc);
4622
4623 ifp = sc->bge_ifp;
4624
4625 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4626 return;
4627
4628 /* Cancel pending I/O and flush buffers. */
4629 bge_stop(sc);
4630
4631 bge_stop_fw(sc);
4632 bge_sig_pre_reset(sc, BGE_RESET_START);
4633 bge_reset(sc);
4634 bge_sig_legacy(sc, BGE_RESET_START);
4635 bge_sig_post_reset(sc, BGE_RESET_START);
4636
4637 bge_chipinit(sc);
4638
4639 /*
4640 * Init the various state machines, ring
4641 * control blocks and firmware.
4642 */
4643 if (bge_blockinit(sc)) {
4644 device_printf(sc->bge_dev, "initialization failure\n");
4645 return;
4646 }
4647
4648 ifp = sc->bge_ifp;
4649
4650 /* Specify MTU. */
4651 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4652 ETHER_HDR_LEN + ETHER_CRC_LEN +
4653 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4654
4655 /* Load our MAC address. */
4656 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4657 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4658 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4659
4660 /* Program promiscuous mode. */
4661 bge_setpromisc(sc);
4662
4663 /* Program multicast filter. */
4664 bge_setmulti(sc);
4665
4666 /* Program VLAN tag stripping. */
4667 bge_setvlan(sc);
4668
4669 /* Override UDP checksum offloading. */
4670 if (sc->bge_forced_udpcsum == 0)
4671 sc->bge_csum_features &= ~CSUM_UDP;
4672 else
4673 sc->bge_csum_features |= CSUM_UDP;
4674 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4675 ifp->if_capenable & IFCAP_TXCSUM) {
4676 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4677 ifp->if_hwassist |= sc->bge_csum_features;
4678 }
4679
4680 /* Init RX ring. */
4681 if (bge_init_rx_ring_std(sc) != 0) {
4682 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4683 bge_stop(sc);
4684 return;
4685 }
4686
4687 /*
4688 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4689 * memory to insure that the chip has in fact read the first
4690 * entry of the ring.
4691 */
4692 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4693 uint32_t v, i;
4694 for (i = 0; i < 10; i++) {
4695 DELAY(20);
4696 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4697 if (v == (MCLBYTES - ETHER_ALIGN))
4698 break;
4699 }
4700 if (i == 10)
4701 device_printf (sc->bge_dev,
4702 "5705 A0 chip failed to load RX ring\n");
4703 }
4704
4705 /* Init jumbo RX ring. */
4692 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4706 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4707 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4693 (MCLBYTES - ETHER_ALIGN)) {
4694 if (bge_init_rx_ring_jumbo(sc) != 0) {
4695 device_printf(sc->bge_dev,
4696 "no memory for jumbo Rx buffers.\n");
4697 bge_stop(sc);
4698 return;
4699 }
4700 }
4701
4702 /* Init our RX return ring index. */
4703 sc->bge_rx_saved_considx = 0;
4704
4705 /* Init our RX/TX stat counters. */
4706 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4707
4708 /* Init TX ring. */
4709 bge_init_tx_ring(sc);
4710
4711 /* Enable TX MAC state machine lockup fix. */
4712 mode = CSR_READ_4(sc, BGE_TX_MODE);
4713 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4714 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4715 /* Turn on transmitter. */
4716 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4717
4718 /* Turn on receiver. */
4719 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4720
4721 /*
4722 * Set the number of good frames to receive after RX MBUF
4723 * Low Watermark has been reached. After the RX MAC receives
4724 * this number of frames, it will drop subsequent incoming
4725 * frames until the MBUF High Watermark is reached.
4726 */
4727 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4728
4729 /* Clear MAC statistics. */
4730 if (BGE_IS_5705_PLUS(sc))
4731 bge_stats_clear_regs(sc);
4732
4733 /* Tell firmware we're alive. */
4734 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4735
4736#ifdef DEVICE_POLLING
4737 /* Disable interrupts if we are polling. */
4738 if (ifp->if_capenable & IFCAP_POLLING) {
4739 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4740 BGE_PCIMISCCTL_MASK_PCI_INTR);
4741 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4742 } else
4743#endif
4744
4745 /* Enable host interrupts. */
4746 {
4747 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4748 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4749 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4750 }
4751
4752 bge_ifmedia_upd_locked(ifp);
4753
4754 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4755 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4756
4757 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4758}
4759
4760static void
4761bge_init(void *xsc)
4762{
4763 struct bge_softc *sc = xsc;
4764
4765 BGE_LOCK(sc);
4766 bge_init_locked(sc);
4767 BGE_UNLOCK(sc);
4768}
4769
4770/*
4771 * Set media options.
4772 */
4773static int
4774bge_ifmedia_upd(struct ifnet *ifp)
4775{
4776 struct bge_softc *sc = ifp->if_softc;
4777 int res;
4778
4779 BGE_LOCK(sc);
4780 res = bge_ifmedia_upd_locked(ifp);
4781 BGE_UNLOCK(sc);
4782
4783 return (res);
4784}
4785
4786static int
4787bge_ifmedia_upd_locked(struct ifnet *ifp)
4788{
4789 struct bge_softc *sc = ifp->if_softc;
4790 struct mii_data *mii;
4791 struct mii_softc *miisc;
4792 struct ifmedia *ifm;
4793
4794 BGE_LOCK_ASSERT(sc);
4795
4796 ifm = &sc->bge_ifmedia;
4797
4798 /* If this is a 1000baseX NIC, enable the TBI port. */
4799 if (sc->bge_flags & BGE_FLAG_TBI) {
4800 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4801 return (EINVAL);
4802 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4803 case IFM_AUTO:
4804 /*
4805 * The BCM5704 ASIC appears to have a special
4806 * mechanism for programming the autoneg
4807 * advertisement registers in TBI mode.
4808 */
4809 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4810 uint32_t sgdig;
4811 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4812 if (sgdig & BGE_SGDIGSTS_DONE) {
4813 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4814 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4815 sgdig |= BGE_SGDIGCFG_AUTO |
4816 BGE_SGDIGCFG_PAUSE_CAP |
4817 BGE_SGDIGCFG_ASYM_PAUSE;
4818 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4819 sgdig | BGE_SGDIGCFG_SEND);
4820 DELAY(5);
4821 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4822 }
4823 }
4824 break;
4825 case IFM_1000_SX:
4826 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4827 BGE_CLRBIT(sc, BGE_MAC_MODE,
4828 BGE_MACMODE_HALF_DUPLEX);
4829 } else {
4830 BGE_SETBIT(sc, BGE_MAC_MODE,
4831 BGE_MACMODE_HALF_DUPLEX);
4832 }
4833 break;
4834 default:
4835 return (EINVAL);
4836 }
4837 return (0);
4838 }
4839
4840 sc->bge_link_evt++;
4841 mii = device_get_softc(sc->bge_miibus);
4842 if (mii->mii_instance)
4843 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4844 mii_phy_reset(miisc);
4845 mii_mediachg(mii);
4846
4847 /*
4848 * Force an interrupt so that we will call bge_link_upd
4849 * if needed and clear any pending link state attention.
4850 * Without this we are not getting any further interrupts
4851 * for link state changes and thus will not UP the link and
4852 * not be able to send in bge_start_locked. The only
4853 * way to get things working was to receive a packet and
4854 * get an RX intr.
4855 * bge_tick should help for fiber cards and we might not
4856 * need to do this here if BGE_FLAG_TBI is set but as
4857 * we poll for fiber anyway it should not harm.
4858 */
4859 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4860 sc->bge_flags & BGE_FLAG_5788)
4861 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4862 else
4863 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4864
4865 return (0);
4866}
4867
4868/*
4869 * Report current media status.
4870 */
4871static void
4872bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4873{
4874 struct bge_softc *sc = ifp->if_softc;
4875 struct mii_data *mii;
4876
4877 BGE_LOCK(sc);
4878
4879 if (sc->bge_flags & BGE_FLAG_TBI) {
4880 ifmr->ifm_status = IFM_AVALID;
4881 ifmr->ifm_active = IFM_ETHER;
4882 if (CSR_READ_4(sc, BGE_MAC_STS) &
4883 BGE_MACSTAT_TBI_PCS_SYNCHED)
4884 ifmr->ifm_status |= IFM_ACTIVE;
4885 else {
4886 ifmr->ifm_active |= IFM_NONE;
4887 BGE_UNLOCK(sc);
4888 return;
4889 }
4890 ifmr->ifm_active |= IFM_1000_SX;
4891 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4892 ifmr->ifm_active |= IFM_HDX;
4893 else
4894 ifmr->ifm_active |= IFM_FDX;
4895 BGE_UNLOCK(sc);
4896 return;
4897 }
4898
4899 mii = device_get_softc(sc->bge_miibus);
4900 mii_pollstat(mii);
4901 ifmr->ifm_active = mii->mii_media_active;
4902 ifmr->ifm_status = mii->mii_media_status;
4903
4904 BGE_UNLOCK(sc);
4905}
4906
4907static int
4908bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4909{
4910 struct bge_softc *sc = ifp->if_softc;
4911 struct ifreq *ifr = (struct ifreq *) data;
4912 struct mii_data *mii;
4913 int flags, mask, error = 0;
4914
4915 switch (command) {
4916 case SIOCSIFMTU:
4708 (MCLBYTES - ETHER_ALIGN)) {
4709 if (bge_init_rx_ring_jumbo(sc) != 0) {
4710 device_printf(sc->bge_dev,
4711 "no memory for jumbo Rx buffers.\n");
4712 bge_stop(sc);
4713 return;
4714 }
4715 }
4716
4717 /* Init our RX return ring index. */
4718 sc->bge_rx_saved_considx = 0;
4719
4720 /* Init our RX/TX stat counters. */
4721 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4722
4723 /* Init TX ring. */
4724 bge_init_tx_ring(sc);
4725
4726 /* Enable TX MAC state machine lockup fix. */
4727 mode = CSR_READ_4(sc, BGE_TX_MODE);
4728 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4729 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4730 /* Turn on transmitter. */
4731 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4732
4733 /* Turn on receiver. */
4734 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4735
4736 /*
4737 * Set the number of good frames to receive after RX MBUF
4738 * Low Watermark has been reached. After the RX MAC receives
4739 * this number of frames, it will drop subsequent incoming
4740 * frames until the MBUF High Watermark is reached.
4741 */
4742 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4743
4744 /* Clear MAC statistics. */
4745 if (BGE_IS_5705_PLUS(sc))
4746 bge_stats_clear_regs(sc);
4747
4748 /* Tell firmware we're alive. */
4749 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4750
4751#ifdef DEVICE_POLLING
4752 /* Disable interrupts if we are polling. */
4753 if (ifp->if_capenable & IFCAP_POLLING) {
4754 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4755 BGE_PCIMISCCTL_MASK_PCI_INTR);
4756 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4757 } else
4758#endif
4759
4760 /* Enable host interrupts. */
4761 {
4762 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4763 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4764 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4765 }
4766
4767 bge_ifmedia_upd_locked(ifp);
4768
4769 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4770 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4771
4772 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4773}
4774
4775static void
4776bge_init(void *xsc)
4777{
4778 struct bge_softc *sc = xsc;
4779
4780 BGE_LOCK(sc);
4781 bge_init_locked(sc);
4782 BGE_UNLOCK(sc);
4783}
4784
4785/*
4786 * Set media options.
4787 */
4788static int
4789bge_ifmedia_upd(struct ifnet *ifp)
4790{
4791 struct bge_softc *sc = ifp->if_softc;
4792 int res;
4793
4794 BGE_LOCK(sc);
4795 res = bge_ifmedia_upd_locked(ifp);
4796 BGE_UNLOCK(sc);
4797
4798 return (res);
4799}
4800
4801static int
4802bge_ifmedia_upd_locked(struct ifnet *ifp)
4803{
4804 struct bge_softc *sc = ifp->if_softc;
4805 struct mii_data *mii;
4806 struct mii_softc *miisc;
4807 struct ifmedia *ifm;
4808
4809 BGE_LOCK_ASSERT(sc);
4810
4811 ifm = &sc->bge_ifmedia;
4812
4813 /* If this is a 1000baseX NIC, enable the TBI port. */
4814 if (sc->bge_flags & BGE_FLAG_TBI) {
4815 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4816 return (EINVAL);
4817 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4818 case IFM_AUTO:
4819 /*
4820 * The BCM5704 ASIC appears to have a special
4821 * mechanism for programming the autoneg
4822 * advertisement registers in TBI mode.
4823 */
4824 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4825 uint32_t sgdig;
4826 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4827 if (sgdig & BGE_SGDIGSTS_DONE) {
4828 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4829 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4830 sgdig |= BGE_SGDIGCFG_AUTO |
4831 BGE_SGDIGCFG_PAUSE_CAP |
4832 BGE_SGDIGCFG_ASYM_PAUSE;
4833 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4834 sgdig | BGE_SGDIGCFG_SEND);
4835 DELAY(5);
4836 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4837 }
4838 }
4839 break;
4840 case IFM_1000_SX:
4841 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4842 BGE_CLRBIT(sc, BGE_MAC_MODE,
4843 BGE_MACMODE_HALF_DUPLEX);
4844 } else {
4845 BGE_SETBIT(sc, BGE_MAC_MODE,
4846 BGE_MACMODE_HALF_DUPLEX);
4847 }
4848 break;
4849 default:
4850 return (EINVAL);
4851 }
4852 return (0);
4853 }
4854
4855 sc->bge_link_evt++;
4856 mii = device_get_softc(sc->bge_miibus);
4857 if (mii->mii_instance)
4858 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4859 mii_phy_reset(miisc);
4860 mii_mediachg(mii);
4861
4862 /*
4863 * Force an interrupt so that we will call bge_link_upd
4864 * if needed and clear any pending link state attention.
4865 * Without this we are not getting any further interrupts
4866 * for link state changes and thus will not UP the link and
4867 * not be able to send in bge_start_locked. The only
4868 * way to get things working was to receive a packet and
4869 * get an RX intr.
4870 * bge_tick should help for fiber cards and we might not
4871 * need to do this here if BGE_FLAG_TBI is set but as
4872 * we poll for fiber anyway it should not harm.
4873 */
4874 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4875 sc->bge_flags & BGE_FLAG_5788)
4876 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4877 else
4878 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4879
4880 return (0);
4881}
4882
4883/*
4884 * Report current media status.
4885 */
4886static void
4887bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4888{
4889 struct bge_softc *sc = ifp->if_softc;
4890 struct mii_data *mii;
4891
4892 BGE_LOCK(sc);
4893
4894 if (sc->bge_flags & BGE_FLAG_TBI) {
4895 ifmr->ifm_status = IFM_AVALID;
4896 ifmr->ifm_active = IFM_ETHER;
4897 if (CSR_READ_4(sc, BGE_MAC_STS) &
4898 BGE_MACSTAT_TBI_PCS_SYNCHED)
4899 ifmr->ifm_status |= IFM_ACTIVE;
4900 else {
4901 ifmr->ifm_active |= IFM_NONE;
4902 BGE_UNLOCK(sc);
4903 return;
4904 }
4905 ifmr->ifm_active |= IFM_1000_SX;
4906 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4907 ifmr->ifm_active |= IFM_HDX;
4908 else
4909 ifmr->ifm_active |= IFM_FDX;
4910 BGE_UNLOCK(sc);
4911 return;
4912 }
4913
4914 mii = device_get_softc(sc->bge_miibus);
4915 mii_pollstat(mii);
4916 ifmr->ifm_active = mii->mii_media_active;
4917 ifmr->ifm_status = mii->mii_media_status;
4918
4919 BGE_UNLOCK(sc);
4920}
4921
4922static int
4923bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4924{
4925 struct bge_softc *sc = ifp->if_softc;
4926 struct ifreq *ifr = (struct ifreq *) data;
4927 struct mii_data *mii;
4928 int flags, mask, error = 0;
4929
4930 switch (command) {
4931 case SIOCSIFMTU:
4932 if (BGE_IS_JUMBO_CAPABLE(sc) ||
4933 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
4934 if (ifr->ifr_mtu < ETHERMIN ||
4935 ifr->ifr_mtu > BGE_JUMBO_MTU) {
4936 error = EINVAL;
4937 break;
4938 }
4939 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
4940 error = EINVAL;
4941 break;
4942 }
4917 BGE_LOCK(sc);
4943 BGE_LOCK(sc);
4918 if (ifr->ifr_mtu < ETHERMIN ||
4919 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4920 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4921 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4922 ifr->ifr_mtu > ETHERMTU))
4923 error = EINVAL;
4924 else if (ifp->if_mtu != ifr->ifr_mtu) {
4944 if (ifp->if_mtu != ifr->ifr_mtu) {
4925 ifp->if_mtu = ifr->ifr_mtu;
4926 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4927 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4928 bge_init_locked(sc);
4929 }
4930 }
4931 BGE_UNLOCK(sc);
4932 break;
4933 case SIOCSIFFLAGS:
4934 BGE_LOCK(sc);
4935 if (ifp->if_flags & IFF_UP) {
4936 /*
4937 * If only the state of the PROMISC flag changed,
4938 * then just use the 'set promisc mode' command
4939 * instead of reinitializing the entire NIC. Doing
4940 * a full re-init means reloading the firmware and
4941 * waiting for it to start up, which may take a
4942 * second or two. Similarly for ALLMULTI.
4943 */
4944 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4945 flags = ifp->if_flags ^ sc->bge_if_flags;
4946 if (flags & IFF_PROMISC)
4947 bge_setpromisc(sc);
4948 if (flags & IFF_ALLMULTI)
4949 bge_setmulti(sc);
4950 } else
4951 bge_init_locked(sc);
4952 } else {
4953 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4954 bge_stop(sc);
4955 }
4956 }
4957 sc->bge_if_flags = ifp->if_flags;
4958 BGE_UNLOCK(sc);
4959 error = 0;
4960 break;
4961 case SIOCADDMULTI:
4962 case SIOCDELMULTI:
4963 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4964 BGE_LOCK(sc);
4965 bge_setmulti(sc);
4966 BGE_UNLOCK(sc);
4967 error = 0;
4968 }
4969 break;
4970 case SIOCSIFMEDIA:
4971 case SIOCGIFMEDIA:
4972 if (sc->bge_flags & BGE_FLAG_TBI) {
4973 error = ifmedia_ioctl(ifp, ifr,
4974 &sc->bge_ifmedia, command);
4975 } else {
4976 mii = device_get_softc(sc->bge_miibus);
4977 error = ifmedia_ioctl(ifp, ifr,
4978 &mii->mii_media, command);
4979 }
4980 break;
4981 case SIOCSIFCAP:
4982 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4983#ifdef DEVICE_POLLING
4984 if (mask & IFCAP_POLLING) {
4985 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4986 error = ether_poll_register(bge_poll, ifp);
4987 if (error)
4988 return (error);
4989 BGE_LOCK(sc);
4990 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4991 BGE_PCIMISCCTL_MASK_PCI_INTR);
4992 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4993 ifp->if_capenable |= IFCAP_POLLING;
4994 BGE_UNLOCK(sc);
4995 } else {
4996 error = ether_poll_deregister(ifp);
4997 /* Enable interrupt even in error case */
4998 BGE_LOCK(sc);
4999 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5000 BGE_PCIMISCCTL_MASK_PCI_INTR);
5001 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5002 ifp->if_capenable &= ~IFCAP_POLLING;
5003 BGE_UNLOCK(sc);
5004 }
5005 }
5006#endif
5007 if ((mask & IFCAP_TXCSUM) != 0 &&
5008 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5009 ifp->if_capenable ^= IFCAP_TXCSUM;
5010 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5011 ifp->if_hwassist |= sc->bge_csum_features;
5012 else
5013 ifp->if_hwassist &= ~sc->bge_csum_features;
5014 }
5015
5016 if ((mask & IFCAP_RXCSUM) != 0 &&
5017 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5018 ifp->if_capenable ^= IFCAP_RXCSUM;
5019
5020 if ((mask & IFCAP_TSO4) != 0 &&
5021 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5022 ifp->if_capenable ^= IFCAP_TSO4;
5023 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5024 ifp->if_hwassist |= CSUM_TSO;
5025 else
5026 ifp->if_hwassist &= ~CSUM_TSO;
5027 }
5028
5029 if (mask & IFCAP_VLAN_MTU) {
5030 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5031 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5032 bge_init(sc);
5033 }
5034
5035 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5036 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5037 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5038 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5039 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5040 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5041 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5042 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5043 BGE_LOCK(sc);
5044 bge_setvlan(sc);
5045 BGE_UNLOCK(sc);
5046 }
5047#ifdef VLAN_CAPABILITIES
5048 VLAN_CAPABILITIES(ifp);
5049#endif
5050 break;
5051 default:
5052 error = ether_ioctl(ifp, command, data);
5053 break;
5054 }
5055
5056 return (error);
5057}
5058
5059static void
5060bge_watchdog(struct bge_softc *sc)
5061{
5062 struct ifnet *ifp;
5063
5064 BGE_LOCK_ASSERT(sc);
5065
5066 if (sc->bge_timer == 0 || --sc->bge_timer)
5067 return;
5068
5069 ifp = sc->bge_ifp;
5070
5071 if_printf(ifp, "watchdog timeout -- resetting\n");
5072
5073 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5074 bge_init_locked(sc);
5075
5076 ifp->if_oerrors++;
5077}
5078
5079/*
5080 * Stop the adapter and free any mbufs allocated to the
5081 * RX and TX lists.
5082 */
5083static void
5084bge_stop(struct bge_softc *sc)
5085{
5086 struct ifnet *ifp;
5087
5088 BGE_LOCK_ASSERT(sc);
5089
5090 ifp = sc->bge_ifp;
5091
5092 callout_stop(&sc->bge_stat_ch);
5093
5094 /* Disable host interrupts. */
5095 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5096 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5097
5098 /*
5099 * Tell firmware we're shutting down.
5100 */
5101 bge_stop_fw(sc);
5102 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5103
5104 /*
5105 * Disable all of the receiver blocks.
5106 */
5107 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5108 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5109 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5110 if (!(BGE_IS_5705_PLUS(sc)))
5111 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5112 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5113 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5114 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5115
5116 /*
5117 * Disable all of the transmit blocks.
5118 */
5119 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5120 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5121 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5122 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5123 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5124 if (!(BGE_IS_5705_PLUS(sc)))
5125 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5126 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5127
5128 /*
5129 * Shut down all of the memory managers and related
5130 * state machines.
5131 */
5132 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5133 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5134 if (!(BGE_IS_5705_PLUS(sc)))
5135 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5136 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5137 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5138 if (!(BGE_IS_5705_PLUS(sc))) {
5139 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5140 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5141 }
5142 /* Update MAC statistics. */
5143 if (BGE_IS_5705_PLUS(sc))
5144 bge_stats_update_regs(sc);
5145
5146 bge_reset(sc);
5147 bge_sig_legacy(sc, BGE_RESET_STOP);
5148 bge_sig_post_reset(sc, BGE_RESET_STOP);
5149
5150 /*
5151 * Keep the ASF firmware running if up.
5152 */
5153 if (sc->bge_asf_mode & ASF_STACKUP)
5154 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5155 else
5156 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5157
5158 /* Free the RX lists. */
5159 bge_free_rx_ring_std(sc);
5160
5161 /* Free jumbo RX list. */
5162 if (BGE_IS_JUMBO_CAPABLE(sc))
5163 bge_free_rx_ring_jumbo(sc);
5164
5165 /* Free TX buffers. */
5166 bge_free_tx_ring(sc);
5167
5168 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5169
5170 /* Clear MAC's link state (PHY may still have link UP). */
5171 if (bootverbose && sc->bge_link)
5172 if_printf(sc->bge_ifp, "link DOWN\n");
5173 sc->bge_link = 0;
5174
5175 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5176}
5177
5178/*
5179 * Stop all chip I/O so that the kernel's probe routines don't
5180 * get confused by errant DMAs when rebooting.
5181 */
5182static int
5183bge_shutdown(device_t dev)
5184{
5185 struct bge_softc *sc;
5186
5187 sc = device_get_softc(dev);
5188 BGE_LOCK(sc);
5189 bge_stop(sc);
5190 bge_reset(sc);
5191 BGE_UNLOCK(sc);
5192
5193 return (0);
5194}
5195
5196static int
5197bge_suspend(device_t dev)
5198{
5199 struct bge_softc *sc;
5200
5201 sc = device_get_softc(dev);
5202 BGE_LOCK(sc);
5203 bge_stop(sc);
5204 BGE_UNLOCK(sc);
5205
5206 return (0);
5207}
5208
5209static int
5210bge_resume(device_t dev)
5211{
5212 struct bge_softc *sc;
5213 struct ifnet *ifp;
5214
5215 sc = device_get_softc(dev);
5216 BGE_LOCK(sc);
5217 ifp = sc->bge_ifp;
5218 if (ifp->if_flags & IFF_UP) {
5219 bge_init_locked(sc);
5220 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5221 bge_start_locked(ifp);
5222 }
5223 BGE_UNLOCK(sc);
5224
5225 return (0);
5226}
5227
5228static void
5229bge_link_upd(struct bge_softc *sc)
5230{
5231 struct mii_data *mii;
5232 uint32_t link, status;
5233
5234 BGE_LOCK_ASSERT(sc);
5235
5236 /* Clear 'pending link event' flag. */
5237 sc->bge_link_evt = 0;
5238
5239 /*
5240 * Process link state changes.
5241 * Grrr. The link status word in the status block does
5242 * not work correctly on the BCM5700 rev AX and BX chips,
5243 * according to all available information. Hence, we have
5244 * to enable MII interrupts in order to properly obtain
5245 * async link changes. Unfortunately, this also means that
5246 * we have to read the MAC status register to detect link
5247 * changes, thereby adding an additional register access to
5248 * the interrupt handler.
5249 *
5250 * XXX: perhaps link state detection procedure used for
5251 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5252 */
5253
5254 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5255 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5256 status = CSR_READ_4(sc, BGE_MAC_STS);
5257 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5258 mii = device_get_softc(sc->bge_miibus);
5259 mii_pollstat(mii);
5260 if (!sc->bge_link &&
5261 mii->mii_media_status & IFM_ACTIVE &&
5262 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5263 sc->bge_link++;
5264 if (bootverbose)
5265 if_printf(sc->bge_ifp, "link UP\n");
5266 } else if (sc->bge_link &&
5267 (!(mii->mii_media_status & IFM_ACTIVE) ||
5268 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5269 sc->bge_link = 0;
5270 if (bootverbose)
5271 if_printf(sc->bge_ifp, "link DOWN\n");
5272 }
5273
5274 /* Clear the interrupt. */
5275 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5276 BGE_EVTENB_MI_INTERRUPT);
5277 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5278 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5279 BRGPHY_INTRS);
5280 }
5281 return;
5282 }
5283
5284 if (sc->bge_flags & BGE_FLAG_TBI) {
5285 status = CSR_READ_4(sc, BGE_MAC_STS);
5286 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5287 if (!sc->bge_link) {
5288 sc->bge_link++;
5289 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5290 BGE_CLRBIT(sc, BGE_MAC_MODE,
5291 BGE_MACMODE_TBI_SEND_CFGS);
5292 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5293 if (bootverbose)
5294 if_printf(sc->bge_ifp, "link UP\n");
5295 if_link_state_change(sc->bge_ifp,
5296 LINK_STATE_UP);
5297 }
5298 } else if (sc->bge_link) {
5299 sc->bge_link = 0;
5300 if (bootverbose)
5301 if_printf(sc->bge_ifp, "link DOWN\n");
5302 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5303 }
5304 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5305 /*
5306 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5307 * in status word always set. Workaround this bug by reading
5308 * PHY link status directly.
5309 */
5310 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5311
5312 if (link != sc->bge_link ||
5313 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5314 mii = device_get_softc(sc->bge_miibus);
5315 mii_pollstat(mii);
5316 if (!sc->bge_link &&
5317 mii->mii_media_status & IFM_ACTIVE &&
5318 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5319 sc->bge_link++;
5320 if (bootverbose)
5321 if_printf(sc->bge_ifp, "link UP\n");
5322 } else if (sc->bge_link &&
5323 (!(mii->mii_media_status & IFM_ACTIVE) ||
5324 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5325 sc->bge_link = 0;
5326 if (bootverbose)
5327 if_printf(sc->bge_ifp, "link DOWN\n");
5328 }
5329 }
5330 } else {
5331 /*
5332 * For controllers that call mii_tick, we have to poll
5333 * link status.
5334 */
5335 mii = device_get_softc(sc->bge_miibus);
5336 mii_pollstat(mii);
5337 bge_miibus_statchg(sc->bge_dev);
5338 }
5339
5340 /* Clear the attention. */
5341 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5342 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5343 BGE_MACSTAT_LINK_CHANGED);
5344}
5345
5346static void
5347bge_add_sysctls(struct bge_softc *sc)
5348{
5349 struct sysctl_ctx_list *ctx;
5350 struct sysctl_oid_list *children;
5351 char tn[32];
5352 int unit;
5353
5354 ctx = device_get_sysctl_ctx(sc->bge_dev);
5355 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5356
5357#ifdef BGE_REGISTER_DEBUG
5358 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5359 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5360 "Debug Information");
5361
5362 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5363 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5364 "Register Read");
5365
5366 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5367 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5368 "Memory Read");
5369
5370#endif
5371
5372 unit = device_get_unit(sc->bge_dev);
5373 /*
5374 * A common design characteristic for many Broadcom client controllers
5375 * is that they only support a single outstanding DMA read operation
5376 * on the PCIe bus. This means that it will take twice as long to fetch
5377 * a TX frame that is split into header and payload buffers as it does
5378 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5379 * these controllers, coalescing buffers to reduce the number of memory
5380 * reads is effective way to get maximum performance(about 940Mbps).
5381 * Without collapsing TX buffers the maximum TCP bulk transfer
5382 * performance is about 850Mbps. However forcing coalescing mbufs
5383 * consumes a lot of CPU cycles, so leave it off by default.
5384 */
5385 sc->bge_forced_collapse = 0;
5386 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5387 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5388 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5389 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5390 "Number of fragmented TX buffers of a frame allowed before "
5391 "forced collapsing");
5392
5393 /*
5394 * It seems all Broadcom controllers have a bug that can generate UDP
5395 * datagrams with checksum value 0 when TX UDP checksum offloading is
5396 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5397 * Even though the probability of generating such UDP datagrams is
5398 * low, I don't want to see FreeBSD boxes to inject such datagrams
5399 * into network so disable UDP checksum offloading by default. Users
5400 * still override this behavior by setting a sysctl variable,
5401 * dev.bge.0.forced_udpcsum.
5402 */
5403 sc->bge_forced_udpcsum = 0;
5404 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5405 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5406 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5407 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5408 "Enable UDP checksum offloading even if controller can "
5409 "generate UDP checksum value 0");
5410
5411 if (BGE_IS_5705_PLUS(sc))
5412 bge_add_sysctl_stats_regs(sc, ctx, children);
5413 else
5414 bge_add_sysctl_stats(sc, ctx, children);
5415}
5416
5417#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5418 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5419 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5420 desc)
5421
5422static void
5423bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5424 struct sysctl_oid_list *parent)
5425{
5426 struct sysctl_oid *tree;
5427 struct sysctl_oid_list *children, *schildren;
5428
5429 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5430 NULL, "BGE Statistics");
5431 schildren = children = SYSCTL_CHILDREN(tree);
5432 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5433 children, COSFramesDroppedDueToFilters,
5434 "FramesDroppedDueToFilters");
5435 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5436 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5437 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5438 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5439 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5440 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5441 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5442 children, ifInDiscards, "InputDiscards");
5443 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5444 children, ifInErrors, "InputErrors");
5445 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5446 children, nicRecvThresholdHit, "RecvThresholdHit");
5447 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5448 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5449 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5450 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5451 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5452 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5453 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5454 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5455 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5456 children, nicRingStatusUpdate, "RingStatusUpdate");
5457 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5458 children, nicInterrupts, "Interrupts");
5459 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5460 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5461 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5462 children, nicSendThresholdHit, "SendThresholdHit");
5463
5464 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5465 NULL, "BGE RX Statistics");
5466 children = SYSCTL_CHILDREN(tree);
5467 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5468 children, rxstats.ifHCInOctets, "ifHCInOctets");
5469 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5470 children, rxstats.etherStatsFragments, "Fragments");
5471 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5472 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5473 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5474 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5475 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5476 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5477 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5478 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5479 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5480 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5481 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5482 children, rxstats.xoffPauseFramesReceived,
5483 "xoffPauseFramesReceived");
5484 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5485 children, rxstats.macControlFramesReceived,
5486 "ControlFramesReceived");
5487 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5488 children, rxstats.xoffStateEntered, "xoffStateEntered");
5489 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5490 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5491 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5492 children, rxstats.etherStatsJabbers, "Jabbers");
5493 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5494 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5495 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5496 children, rxstats.inRangeLengthError, "inRangeLengthError");
5497 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5498 children, rxstats.outRangeLengthError, "outRangeLengthError");
5499
5500 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5501 NULL, "BGE TX Statistics");
5502 children = SYSCTL_CHILDREN(tree);
5503 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5504 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5505 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5506 children, txstats.etherStatsCollisions, "Collisions");
5507 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5508 children, txstats.outXonSent, "XonSent");
5509 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5510 children, txstats.outXoffSent, "XoffSent");
5511 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5512 children, txstats.flowControlDone, "flowControlDone");
5513 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5514 children, txstats.dot3StatsInternalMacTransmitErrors,
5515 "InternalMacTransmitErrors");
5516 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5517 children, txstats.dot3StatsSingleCollisionFrames,
5518 "SingleCollisionFrames");
5519 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5520 children, txstats.dot3StatsMultipleCollisionFrames,
5521 "MultipleCollisionFrames");
5522 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5523 children, txstats.dot3StatsDeferredTransmissions,
5524 "DeferredTransmissions");
5525 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5526 children, txstats.dot3StatsExcessiveCollisions,
5527 "ExcessiveCollisions");
5528 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5529 children, txstats.dot3StatsLateCollisions,
5530 "LateCollisions");
5531 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5532 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5533 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5534 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5535 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5536 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5537 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5538 children, txstats.dot3StatsCarrierSenseErrors,
5539 "CarrierSenseErrors");
5540 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5541 children, txstats.ifOutDiscards, "Discards");
5542 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5543 children, txstats.ifOutErrors, "Errors");
5544}
5545
5546#undef BGE_SYSCTL_STAT
5547
5548#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5549 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5550
5551static void
5552bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5553 struct sysctl_oid_list *parent)
5554{
5555 struct sysctl_oid *tree;
5556 struct sysctl_oid_list *child, *schild;
5557 struct bge_mac_stats *stats;
5558
5559 stats = &sc->bge_mac_stats;
5560 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5561 NULL, "BGE Statistics");
5562 schild = child = SYSCTL_CHILDREN(tree);
5563 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5564 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5565 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5566 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5567 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5568 &stats->DmaWriteHighPriQueueFull,
5569 "NIC DMA Write High Priority Queue Full");
5570 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5571 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5572 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5573 &stats->InputDiscards, "Discarded Input Frames");
5574 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5575 &stats->InputErrors, "Input Errors");
5576 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5577 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5578
5579 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5580 NULL, "BGE RX Statistics");
5581 child = SYSCTL_CHILDREN(tree);
5582 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5583 &stats->ifHCInOctets, "Inbound Octets");
5584 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5585 &stats->etherStatsFragments, "Fragments");
5586 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5587 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5588 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5589 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5590 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5591 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5592 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5593 &stats->dot3StatsFCSErrors, "FCS Errors");
5594 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5595 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5596 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5597 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5598 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5599 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5600 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5601 &stats->macControlFramesReceived, "MAC Control Frames Received");
5602 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5603 &stats->xoffStateEntered, "XOFF State Entered");
5604 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5605 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5606 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5607 &stats->etherStatsJabbers, "Jabbers");
5608 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5609 &stats->etherStatsUndersizePkts, "Undersized Packets");
5610
5611 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5612 NULL, "BGE TX Statistics");
5613 child = SYSCTL_CHILDREN(tree);
5614 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5615 &stats->ifHCOutOctets, "Outbound Octets");
5616 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5617 &stats->etherStatsCollisions, "TX Collisions");
5618 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5619 &stats->outXonSent, "XON Sent");
5620 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5621 &stats->outXoffSent, "XOFF Sent");
5622 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5623 &stats->dot3StatsInternalMacTransmitErrors,
5624 "Internal MAC TX Errors");
5625 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5626 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5627 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5628 &stats->dot3StatsMultipleCollisionFrames,
5629 "Multiple Collision Frames");
5630 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5631 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5632 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5633 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5634 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5635 &stats->dot3StatsLateCollisions, "Late Collisions");
5636 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5637 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5638 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5639 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5640 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5641 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5642}
5643
5644#undef BGE_SYSCTL_STAT_ADD64
5645
5646static int
5647bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5648{
5649 struct bge_softc *sc;
5650 uint32_t result;
5651 int offset;
5652
5653 sc = (struct bge_softc *)arg1;
5654 offset = arg2;
5655 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5656 offsetof(bge_hostaddr, bge_addr_lo));
5657 return (sysctl_handle_int(oidp, &result, 0, req));
5658}
5659
5660#ifdef BGE_REGISTER_DEBUG
5661static int
5662bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5663{
5664 struct bge_softc *sc;
5665 uint16_t *sbdata;
5666 int error;
5667 int result;
5668 int i, j;
5669
5670 result = -1;
5671 error = sysctl_handle_int(oidp, &result, 0, req);
5672 if (error || (req->newptr == NULL))
5673 return (error);
5674
5675 if (result == 1) {
5676 sc = (struct bge_softc *)arg1;
5677
5678 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5679 printf("Status Block:\n");
5680 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5681 printf("%06x:", i);
5682 for (j = 0; j < 8; j++) {
5683 printf(" %04x", sbdata[i]);
5684 i += 4;
5685 }
5686 printf("\n");
5687 }
5688
5689 printf("Registers:\n");
5690 for (i = 0x800; i < 0xA00; ) {
5691 printf("%06x:", i);
5692 for (j = 0; j < 8; j++) {
5693 printf(" %08x", CSR_READ_4(sc, i));
5694 i += 4;
5695 }
5696 printf("\n");
5697 }
5698
5699 printf("Hardware Flags:\n");
5700 if (BGE_IS_5755_PLUS(sc))
5701 printf(" - 5755 Plus\n");
5702 if (BGE_IS_575X_PLUS(sc))
5703 printf(" - 575X Plus\n");
5704 if (BGE_IS_5705_PLUS(sc))
5705 printf(" - 5705 Plus\n");
5706 if (BGE_IS_5714_FAMILY(sc))
5707 printf(" - 5714 Family\n");
5708 if (BGE_IS_5700_FAMILY(sc))
5709 printf(" - 5700 Family\n");
5710 if (sc->bge_flags & BGE_FLAG_JUMBO)
5711 printf(" - Supports Jumbo Frames\n");
5712 if (sc->bge_flags & BGE_FLAG_PCIX)
5713 printf(" - PCI-X Bus\n");
5714 if (sc->bge_flags & BGE_FLAG_PCIE)
5715 printf(" - PCI Express Bus\n");
5716 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5717 printf(" - No 3 LEDs\n");
5718 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5719 printf(" - RX Alignment Bug\n");
5720 }
5721
5722 return (error);
5723}
5724
5725static int
5726bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5727{
5728 struct bge_softc *sc;
5729 int error;
5730 uint16_t result;
5731 uint32_t val;
5732
5733 result = -1;
5734 error = sysctl_handle_int(oidp, &result, 0, req);
5735 if (error || (req->newptr == NULL))
5736 return (error);
5737
5738 if (result < 0x8000) {
5739 sc = (struct bge_softc *)arg1;
5740 val = CSR_READ_4(sc, result);
5741 printf("reg 0x%06X = 0x%08X\n", result, val);
5742 }
5743
5744 return (error);
5745}
5746
5747static int
5748bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5749{
5750 struct bge_softc *sc;
5751 int error;
5752 uint16_t result;
5753 uint32_t val;
5754
5755 result = -1;
5756 error = sysctl_handle_int(oidp, &result, 0, req);
5757 if (error || (req->newptr == NULL))
5758 return (error);
5759
5760 if (result < 0x8000) {
5761 sc = (struct bge_softc *)arg1;
5762 val = bge_readmem_ind(sc, result);
5763 printf("mem 0x%06X = 0x%08X\n", result, val);
5764 }
5765
5766 return (error);
5767}
5768#endif
5769
5770static int
5771bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5772{
5773
5774 if (sc->bge_flags & BGE_FLAG_EADDR)
5775 return (1);
5776
5777#ifdef __sparc64__
5778 OF_getetheraddr(sc->bge_dev, ether_addr);
5779 return (0);
5780#endif
5781 return (1);
5782}
5783
5784static int
5785bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5786{
5787 uint32_t mac_addr;
5788
5789 mac_addr = bge_readmem_ind(sc, 0x0c14);
5790 if ((mac_addr >> 16) == 0x484b) {
5791 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5792 ether_addr[1] = (uint8_t)mac_addr;
5793 mac_addr = bge_readmem_ind(sc, 0x0c18);
5794 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5795 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5796 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5797 ether_addr[5] = (uint8_t)mac_addr;
5798 return (0);
5799 }
5800 return (1);
5801}
5802
5803static int
5804bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5805{
5806 int mac_offset = BGE_EE_MAC_OFFSET;
5807
5808 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5809 mac_offset = BGE_EE_MAC_OFFSET_5906;
5810
5811 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5812 ETHER_ADDR_LEN));
5813}
5814
5815static int
5816bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5817{
5818
5819 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5820 return (1);
5821
5822 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5823 ETHER_ADDR_LEN));
5824}
5825
5826static int
5827bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5828{
5829 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5830 /* NOTE: Order is critical */
5831 bge_get_eaddr_fw,
5832 bge_get_eaddr_mem,
5833 bge_get_eaddr_nvram,
5834 bge_get_eaddr_eeprom,
5835 NULL
5836 };
5837 const bge_eaddr_fcn_t *func;
5838
5839 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5840 if ((*func)(sc, eaddr) == 0)
5841 break;
5842 }
5843 return (*func == NULL ? ENXIO : 0);
5844}
4945 ifp->if_mtu = ifr->ifr_mtu;
4946 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4947 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4948 bge_init_locked(sc);
4949 }
4950 }
4951 BGE_UNLOCK(sc);
4952 break;
4953 case SIOCSIFFLAGS:
4954 BGE_LOCK(sc);
4955 if (ifp->if_flags & IFF_UP) {
4956 /*
4957 * If only the state of the PROMISC flag changed,
4958 * then just use the 'set promisc mode' command
4959 * instead of reinitializing the entire NIC. Doing
4960 * a full re-init means reloading the firmware and
4961 * waiting for it to start up, which may take a
4962 * second or two. Similarly for ALLMULTI.
4963 */
4964 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4965 flags = ifp->if_flags ^ sc->bge_if_flags;
4966 if (flags & IFF_PROMISC)
4967 bge_setpromisc(sc);
4968 if (flags & IFF_ALLMULTI)
4969 bge_setmulti(sc);
4970 } else
4971 bge_init_locked(sc);
4972 } else {
4973 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4974 bge_stop(sc);
4975 }
4976 }
4977 sc->bge_if_flags = ifp->if_flags;
4978 BGE_UNLOCK(sc);
4979 error = 0;
4980 break;
4981 case SIOCADDMULTI:
4982 case SIOCDELMULTI:
4983 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4984 BGE_LOCK(sc);
4985 bge_setmulti(sc);
4986 BGE_UNLOCK(sc);
4987 error = 0;
4988 }
4989 break;
4990 case SIOCSIFMEDIA:
4991 case SIOCGIFMEDIA:
4992 if (sc->bge_flags & BGE_FLAG_TBI) {
4993 error = ifmedia_ioctl(ifp, ifr,
4994 &sc->bge_ifmedia, command);
4995 } else {
4996 mii = device_get_softc(sc->bge_miibus);
4997 error = ifmedia_ioctl(ifp, ifr,
4998 &mii->mii_media, command);
4999 }
5000 break;
5001 case SIOCSIFCAP:
5002 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5003#ifdef DEVICE_POLLING
5004 if (mask & IFCAP_POLLING) {
5005 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5006 error = ether_poll_register(bge_poll, ifp);
5007 if (error)
5008 return (error);
5009 BGE_LOCK(sc);
5010 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5011 BGE_PCIMISCCTL_MASK_PCI_INTR);
5012 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5013 ifp->if_capenable |= IFCAP_POLLING;
5014 BGE_UNLOCK(sc);
5015 } else {
5016 error = ether_poll_deregister(ifp);
5017 /* Enable interrupt even in error case */
5018 BGE_LOCK(sc);
5019 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5020 BGE_PCIMISCCTL_MASK_PCI_INTR);
5021 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5022 ifp->if_capenable &= ~IFCAP_POLLING;
5023 BGE_UNLOCK(sc);
5024 }
5025 }
5026#endif
5027 if ((mask & IFCAP_TXCSUM) != 0 &&
5028 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5029 ifp->if_capenable ^= IFCAP_TXCSUM;
5030 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5031 ifp->if_hwassist |= sc->bge_csum_features;
5032 else
5033 ifp->if_hwassist &= ~sc->bge_csum_features;
5034 }
5035
5036 if ((mask & IFCAP_RXCSUM) != 0 &&
5037 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5038 ifp->if_capenable ^= IFCAP_RXCSUM;
5039
5040 if ((mask & IFCAP_TSO4) != 0 &&
5041 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5042 ifp->if_capenable ^= IFCAP_TSO4;
5043 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5044 ifp->if_hwassist |= CSUM_TSO;
5045 else
5046 ifp->if_hwassist &= ~CSUM_TSO;
5047 }
5048
5049 if (mask & IFCAP_VLAN_MTU) {
5050 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5051 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5052 bge_init(sc);
5053 }
5054
5055 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5056 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5057 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5058 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5059 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5060 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5061 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5062 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5063 BGE_LOCK(sc);
5064 bge_setvlan(sc);
5065 BGE_UNLOCK(sc);
5066 }
5067#ifdef VLAN_CAPABILITIES
5068 VLAN_CAPABILITIES(ifp);
5069#endif
5070 break;
5071 default:
5072 error = ether_ioctl(ifp, command, data);
5073 break;
5074 }
5075
5076 return (error);
5077}
5078
5079static void
5080bge_watchdog(struct bge_softc *sc)
5081{
5082 struct ifnet *ifp;
5083
5084 BGE_LOCK_ASSERT(sc);
5085
5086 if (sc->bge_timer == 0 || --sc->bge_timer)
5087 return;
5088
5089 ifp = sc->bge_ifp;
5090
5091 if_printf(ifp, "watchdog timeout -- resetting\n");
5092
5093 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5094 bge_init_locked(sc);
5095
5096 ifp->if_oerrors++;
5097}
5098
5099/*
5100 * Stop the adapter and free any mbufs allocated to the
5101 * RX and TX lists.
5102 */
5103static void
5104bge_stop(struct bge_softc *sc)
5105{
5106 struct ifnet *ifp;
5107
5108 BGE_LOCK_ASSERT(sc);
5109
5110 ifp = sc->bge_ifp;
5111
5112 callout_stop(&sc->bge_stat_ch);
5113
5114 /* Disable host interrupts. */
5115 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5116 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5117
5118 /*
5119 * Tell firmware we're shutting down.
5120 */
5121 bge_stop_fw(sc);
5122 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5123
5124 /*
5125 * Disable all of the receiver blocks.
5126 */
5127 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5128 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5129 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5130 if (!(BGE_IS_5705_PLUS(sc)))
5131 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5132 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5133 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5134 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5135
5136 /*
5137 * Disable all of the transmit blocks.
5138 */
5139 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5140 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5141 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5142 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5143 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5144 if (!(BGE_IS_5705_PLUS(sc)))
5145 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5146 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5147
5148 /*
5149 * Shut down all of the memory managers and related
5150 * state machines.
5151 */
5152 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5153 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5154 if (!(BGE_IS_5705_PLUS(sc)))
5155 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5156 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5157 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5158 if (!(BGE_IS_5705_PLUS(sc))) {
5159 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5160 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5161 }
5162 /* Update MAC statistics. */
5163 if (BGE_IS_5705_PLUS(sc))
5164 bge_stats_update_regs(sc);
5165
5166 bge_reset(sc);
5167 bge_sig_legacy(sc, BGE_RESET_STOP);
5168 bge_sig_post_reset(sc, BGE_RESET_STOP);
5169
5170 /*
5171 * Keep the ASF firmware running if up.
5172 */
5173 if (sc->bge_asf_mode & ASF_STACKUP)
5174 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5175 else
5176 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5177
5178 /* Free the RX lists. */
5179 bge_free_rx_ring_std(sc);
5180
5181 /* Free jumbo RX list. */
5182 if (BGE_IS_JUMBO_CAPABLE(sc))
5183 bge_free_rx_ring_jumbo(sc);
5184
5185 /* Free TX buffers. */
5186 bge_free_tx_ring(sc);
5187
5188 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5189
5190 /* Clear MAC's link state (PHY may still have link UP). */
5191 if (bootverbose && sc->bge_link)
5192 if_printf(sc->bge_ifp, "link DOWN\n");
5193 sc->bge_link = 0;
5194
5195 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5196}
5197
5198/*
5199 * Stop all chip I/O so that the kernel's probe routines don't
5200 * get confused by errant DMAs when rebooting.
5201 */
5202static int
5203bge_shutdown(device_t dev)
5204{
5205 struct bge_softc *sc;
5206
5207 sc = device_get_softc(dev);
5208 BGE_LOCK(sc);
5209 bge_stop(sc);
5210 bge_reset(sc);
5211 BGE_UNLOCK(sc);
5212
5213 return (0);
5214}
5215
5216static int
5217bge_suspend(device_t dev)
5218{
5219 struct bge_softc *sc;
5220
5221 sc = device_get_softc(dev);
5222 BGE_LOCK(sc);
5223 bge_stop(sc);
5224 BGE_UNLOCK(sc);
5225
5226 return (0);
5227}
5228
5229static int
5230bge_resume(device_t dev)
5231{
5232 struct bge_softc *sc;
5233 struct ifnet *ifp;
5234
5235 sc = device_get_softc(dev);
5236 BGE_LOCK(sc);
5237 ifp = sc->bge_ifp;
5238 if (ifp->if_flags & IFF_UP) {
5239 bge_init_locked(sc);
5240 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5241 bge_start_locked(ifp);
5242 }
5243 BGE_UNLOCK(sc);
5244
5245 return (0);
5246}
5247
5248static void
5249bge_link_upd(struct bge_softc *sc)
5250{
5251 struct mii_data *mii;
5252 uint32_t link, status;
5253
5254 BGE_LOCK_ASSERT(sc);
5255
5256 /* Clear 'pending link event' flag. */
5257 sc->bge_link_evt = 0;
5258
5259 /*
5260 * Process link state changes.
5261 * Grrr. The link status word in the status block does
5262 * not work correctly on the BCM5700 rev AX and BX chips,
5263 * according to all available information. Hence, we have
5264 * to enable MII interrupts in order to properly obtain
5265 * async link changes. Unfortunately, this also means that
5266 * we have to read the MAC status register to detect link
5267 * changes, thereby adding an additional register access to
5268 * the interrupt handler.
5269 *
5270 * XXX: perhaps link state detection procedure used for
5271 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5272 */
5273
5274 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5275 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5276 status = CSR_READ_4(sc, BGE_MAC_STS);
5277 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5278 mii = device_get_softc(sc->bge_miibus);
5279 mii_pollstat(mii);
5280 if (!sc->bge_link &&
5281 mii->mii_media_status & IFM_ACTIVE &&
5282 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5283 sc->bge_link++;
5284 if (bootverbose)
5285 if_printf(sc->bge_ifp, "link UP\n");
5286 } else if (sc->bge_link &&
5287 (!(mii->mii_media_status & IFM_ACTIVE) ||
5288 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5289 sc->bge_link = 0;
5290 if (bootverbose)
5291 if_printf(sc->bge_ifp, "link DOWN\n");
5292 }
5293
5294 /* Clear the interrupt. */
5295 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5296 BGE_EVTENB_MI_INTERRUPT);
5297 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5298 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5299 BRGPHY_INTRS);
5300 }
5301 return;
5302 }
5303
5304 if (sc->bge_flags & BGE_FLAG_TBI) {
5305 status = CSR_READ_4(sc, BGE_MAC_STS);
5306 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5307 if (!sc->bge_link) {
5308 sc->bge_link++;
5309 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5310 BGE_CLRBIT(sc, BGE_MAC_MODE,
5311 BGE_MACMODE_TBI_SEND_CFGS);
5312 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5313 if (bootverbose)
5314 if_printf(sc->bge_ifp, "link UP\n");
5315 if_link_state_change(sc->bge_ifp,
5316 LINK_STATE_UP);
5317 }
5318 } else if (sc->bge_link) {
5319 sc->bge_link = 0;
5320 if (bootverbose)
5321 if_printf(sc->bge_ifp, "link DOWN\n");
5322 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5323 }
5324 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5325 /*
5326 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5327 * in status word always set. Workaround this bug by reading
5328 * PHY link status directly.
5329 */
5330 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5331
5332 if (link != sc->bge_link ||
5333 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5334 mii = device_get_softc(sc->bge_miibus);
5335 mii_pollstat(mii);
5336 if (!sc->bge_link &&
5337 mii->mii_media_status & IFM_ACTIVE &&
5338 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5339 sc->bge_link++;
5340 if (bootverbose)
5341 if_printf(sc->bge_ifp, "link UP\n");
5342 } else if (sc->bge_link &&
5343 (!(mii->mii_media_status & IFM_ACTIVE) ||
5344 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5345 sc->bge_link = 0;
5346 if (bootverbose)
5347 if_printf(sc->bge_ifp, "link DOWN\n");
5348 }
5349 }
5350 } else {
5351 /*
5352 * For controllers that call mii_tick, we have to poll
5353 * link status.
5354 */
5355 mii = device_get_softc(sc->bge_miibus);
5356 mii_pollstat(mii);
5357 bge_miibus_statchg(sc->bge_dev);
5358 }
5359
5360 /* Clear the attention. */
5361 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5362 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5363 BGE_MACSTAT_LINK_CHANGED);
5364}
5365
5366static void
5367bge_add_sysctls(struct bge_softc *sc)
5368{
5369 struct sysctl_ctx_list *ctx;
5370 struct sysctl_oid_list *children;
5371 char tn[32];
5372 int unit;
5373
5374 ctx = device_get_sysctl_ctx(sc->bge_dev);
5375 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5376
5377#ifdef BGE_REGISTER_DEBUG
5378 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5379 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5380 "Debug Information");
5381
5382 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5383 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5384 "Register Read");
5385
5386 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5387 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5388 "Memory Read");
5389
5390#endif
5391
5392 unit = device_get_unit(sc->bge_dev);
5393 /*
5394 * A common design characteristic for many Broadcom client controllers
5395 * is that they only support a single outstanding DMA read operation
5396 * on the PCIe bus. This means that it will take twice as long to fetch
5397 * a TX frame that is split into header and payload buffers as it does
5398 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5399 * these controllers, coalescing buffers to reduce the number of memory
5400 * reads is effective way to get maximum performance(about 940Mbps).
5401 * Without collapsing TX buffers the maximum TCP bulk transfer
5402 * performance is about 850Mbps. However forcing coalescing mbufs
5403 * consumes a lot of CPU cycles, so leave it off by default.
5404 */
5405 sc->bge_forced_collapse = 0;
5406 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5407 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5408 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5409 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5410 "Number of fragmented TX buffers of a frame allowed before "
5411 "forced collapsing");
5412
5413 /*
5414 * It seems all Broadcom controllers have a bug that can generate UDP
5415 * datagrams with checksum value 0 when TX UDP checksum offloading is
5416 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5417 * Even though the probability of generating such UDP datagrams is
5418 * low, I don't want to see FreeBSD boxes to inject such datagrams
5419 * into network so disable UDP checksum offloading by default. Users
5420 * still override this behavior by setting a sysctl variable,
5421 * dev.bge.0.forced_udpcsum.
5422 */
5423 sc->bge_forced_udpcsum = 0;
5424 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5425 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5426 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5427 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5428 "Enable UDP checksum offloading even if controller can "
5429 "generate UDP checksum value 0");
5430
5431 if (BGE_IS_5705_PLUS(sc))
5432 bge_add_sysctl_stats_regs(sc, ctx, children);
5433 else
5434 bge_add_sysctl_stats(sc, ctx, children);
5435}
5436
5437#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5438 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5439 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5440 desc)
5441
5442static void
5443bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5444 struct sysctl_oid_list *parent)
5445{
5446 struct sysctl_oid *tree;
5447 struct sysctl_oid_list *children, *schildren;
5448
5449 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5450 NULL, "BGE Statistics");
5451 schildren = children = SYSCTL_CHILDREN(tree);
5452 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5453 children, COSFramesDroppedDueToFilters,
5454 "FramesDroppedDueToFilters");
5455 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5456 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5457 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5458 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5459 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5460 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5461 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5462 children, ifInDiscards, "InputDiscards");
5463 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5464 children, ifInErrors, "InputErrors");
5465 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5466 children, nicRecvThresholdHit, "RecvThresholdHit");
5467 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5468 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5469 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5470 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5471 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5472 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5473 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5474 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5475 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5476 children, nicRingStatusUpdate, "RingStatusUpdate");
5477 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5478 children, nicInterrupts, "Interrupts");
5479 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5480 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5481 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5482 children, nicSendThresholdHit, "SendThresholdHit");
5483
5484 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5485 NULL, "BGE RX Statistics");
5486 children = SYSCTL_CHILDREN(tree);
5487 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5488 children, rxstats.ifHCInOctets, "ifHCInOctets");
5489 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5490 children, rxstats.etherStatsFragments, "Fragments");
5491 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5492 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5493 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5494 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5495 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5496 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5497 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5498 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5499 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5500 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5501 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5502 children, rxstats.xoffPauseFramesReceived,
5503 "xoffPauseFramesReceived");
5504 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5505 children, rxstats.macControlFramesReceived,
5506 "ControlFramesReceived");
5507 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5508 children, rxstats.xoffStateEntered, "xoffStateEntered");
5509 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5510 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5511 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5512 children, rxstats.etherStatsJabbers, "Jabbers");
5513 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5514 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5515 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5516 children, rxstats.inRangeLengthError, "inRangeLengthError");
5517 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5518 children, rxstats.outRangeLengthError, "outRangeLengthError");
5519
5520 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5521 NULL, "BGE TX Statistics");
5522 children = SYSCTL_CHILDREN(tree);
5523 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5524 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5525 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5526 children, txstats.etherStatsCollisions, "Collisions");
5527 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5528 children, txstats.outXonSent, "XonSent");
5529 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5530 children, txstats.outXoffSent, "XoffSent");
5531 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5532 children, txstats.flowControlDone, "flowControlDone");
5533 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5534 children, txstats.dot3StatsInternalMacTransmitErrors,
5535 "InternalMacTransmitErrors");
5536 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5537 children, txstats.dot3StatsSingleCollisionFrames,
5538 "SingleCollisionFrames");
5539 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5540 children, txstats.dot3StatsMultipleCollisionFrames,
5541 "MultipleCollisionFrames");
5542 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5543 children, txstats.dot3StatsDeferredTransmissions,
5544 "DeferredTransmissions");
5545 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5546 children, txstats.dot3StatsExcessiveCollisions,
5547 "ExcessiveCollisions");
5548 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5549 children, txstats.dot3StatsLateCollisions,
5550 "LateCollisions");
5551 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5552 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5553 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5554 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5555 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5556 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5557 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5558 children, txstats.dot3StatsCarrierSenseErrors,
5559 "CarrierSenseErrors");
5560 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5561 children, txstats.ifOutDiscards, "Discards");
5562 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5563 children, txstats.ifOutErrors, "Errors");
5564}
5565
5566#undef BGE_SYSCTL_STAT
5567
5568#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5569 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5570
5571static void
5572bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5573 struct sysctl_oid_list *parent)
5574{
5575 struct sysctl_oid *tree;
5576 struct sysctl_oid_list *child, *schild;
5577 struct bge_mac_stats *stats;
5578
5579 stats = &sc->bge_mac_stats;
5580 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5581 NULL, "BGE Statistics");
5582 schild = child = SYSCTL_CHILDREN(tree);
5583 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5584 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5585 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5586 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5587 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5588 &stats->DmaWriteHighPriQueueFull,
5589 "NIC DMA Write High Priority Queue Full");
5590 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5591 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5592 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5593 &stats->InputDiscards, "Discarded Input Frames");
5594 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5595 &stats->InputErrors, "Input Errors");
5596 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5597 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5598
5599 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5600 NULL, "BGE RX Statistics");
5601 child = SYSCTL_CHILDREN(tree);
5602 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5603 &stats->ifHCInOctets, "Inbound Octets");
5604 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5605 &stats->etherStatsFragments, "Fragments");
5606 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5607 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5608 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5609 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5610 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5611 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5612 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5613 &stats->dot3StatsFCSErrors, "FCS Errors");
5614 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5615 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5616 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5617 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5618 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5619 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5620 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5621 &stats->macControlFramesReceived, "MAC Control Frames Received");
5622 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5623 &stats->xoffStateEntered, "XOFF State Entered");
5624 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5625 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5626 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5627 &stats->etherStatsJabbers, "Jabbers");
5628 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5629 &stats->etherStatsUndersizePkts, "Undersized Packets");
5630
5631 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5632 NULL, "BGE TX Statistics");
5633 child = SYSCTL_CHILDREN(tree);
5634 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5635 &stats->ifHCOutOctets, "Outbound Octets");
5636 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5637 &stats->etherStatsCollisions, "TX Collisions");
5638 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5639 &stats->outXonSent, "XON Sent");
5640 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5641 &stats->outXoffSent, "XOFF Sent");
5642 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5643 &stats->dot3StatsInternalMacTransmitErrors,
5644 "Internal MAC TX Errors");
5645 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5646 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5647 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5648 &stats->dot3StatsMultipleCollisionFrames,
5649 "Multiple Collision Frames");
5650 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5651 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5652 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5653 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5654 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5655 &stats->dot3StatsLateCollisions, "Late Collisions");
5656 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5657 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5658 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5659 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5660 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5661 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5662}
5663
5664#undef BGE_SYSCTL_STAT_ADD64
5665
5666static int
5667bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5668{
5669 struct bge_softc *sc;
5670 uint32_t result;
5671 int offset;
5672
5673 sc = (struct bge_softc *)arg1;
5674 offset = arg2;
5675 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5676 offsetof(bge_hostaddr, bge_addr_lo));
5677 return (sysctl_handle_int(oidp, &result, 0, req));
5678}
5679
5680#ifdef BGE_REGISTER_DEBUG
5681static int
5682bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5683{
5684 struct bge_softc *sc;
5685 uint16_t *sbdata;
5686 int error;
5687 int result;
5688 int i, j;
5689
5690 result = -1;
5691 error = sysctl_handle_int(oidp, &result, 0, req);
5692 if (error || (req->newptr == NULL))
5693 return (error);
5694
5695 if (result == 1) {
5696 sc = (struct bge_softc *)arg1;
5697
5698 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5699 printf("Status Block:\n");
5700 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5701 printf("%06x:", i);
5702 for (j = 0; j < 8; j++) {
5703 printf(" %04x", sbdata[i]);
5704 i += 4;
5705 }
5706 printf("\n");
5707 }
5708
5709 printf("Registers:\n");
5710 for (i = 0x800; i < 0xA00; ) {
5711 printf("%06x:", i);
5712 for (j = 0; j < 8; j++) {
5713 printf(" %08x", CSR_READ_4(sc, i));
5714 i += 4;
5715 }
5716 printf("\n");
5717 }
5718
5719 printf("Hardware Flags:\n");
5720 if (BGE_IS_5755_PLUS(sc))
5721 printf(" - 5755 Plus\n");
5722 if (BGE_IS_575X_PLUS(sc))
5723 printf(" - 575X Plus\n");
5724 if (BGE_IS_5705_PLUS(sc))
5725 printf(" - 5705 Plus\n");
5726 if (BGE_IS_5714_FAMILY(sc))
5727 printf(" - 5714 Family\n");
5728 if (BGE_IS_5700_FAMILY(sc))
5729 printf(" - 5700 Family\n");
5730 if (sc->bge_flags & BGE_FLAG_JUMBO)
5731 printf(" - Supports Jumbo Frames\n");
5732 if (sc->bge_flags & BGE_FLAG_PCIX)
5733 printf(" - PCI-X Bus\n");
5734 if (sc->bge_flags & BGE_FLAG_PCIE)
5735 printf(" - PCI Express Bus\n");
5736 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5737 printf(" - No 3 LEDs\n");
5738 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5739 printf(" - RX Alignment Bug\n");
5740 }
5741
5742 return (error);
5743}
5744
5745static int
5746bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5747{
5748 struct bge_softc *sc;
5749 int error;
5750 uint16_t result;
5751 uint32_t val;
5752
5753 result = -1;
5754 error = sysctl_handle_int(oidp, &result, 0, req);
5755 if (error || (req->newptr == NULL))
5756 return (error);
5757
5758 if (result < 0x8000) {
5759 sc = (struct bge_softc *)arg1;
5760 val = CSR_READ_4(sc, result);
5761 printf("reg 0x%06X = 0x%08X\n", result, val);
5762 }
5763
5764 return (error);
5765}
5766
5767static int
5768bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5769{
5770 struct bge_softc *sc;
5771 int error;
5772 uint16_t result;
5773 uint32_t val;
5774
5775 result = -1;
5776 error = sysctl_handle_int(oidp, &result, 0, req);
5777 if (error || (req->newptr == NULL))
5778 return (error);
5779
5780 if (result < 0x8000) {
5781 sc = (struct bge_softc *)arg1;
5782 val = bge_readmem_ind(sc, result);
5783 printf("mem 0x%06X = 0x%08X\n", result, val);
5784 }
5785
5786 return (error);
5787}
5788#endif
5789
5790static int
5791bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5792{
5793
5794 if (sc->bge_flags & BGE_FLAG_EADDR)
5795 return (1);
5796
5797#ifdef __sparc64__
5798 OF_getetheraddr(sc->bge_dev, ether_addr);
5799 return (0);
5800#endif
5801 return (1);
5802}
5803
5804static int
5805bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5806{
5807 uint32_t mac_addr;
5808
5809 mac_addr = bge_readmem_ind(sc, 0x0c14);
5810 if ((mac_addr >> 16) == 0x484b) {
5811 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5812 ether_addr[1] = (uint8_t)mac_addr;
5813 mac_addr = bge_readmem_ind(sc, 0x0c18);
5814 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5815 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5816 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5817 ether_addr[5] = (uint8_t)mac_addr;
5818 return (0);
5819 }
5820 return (1);
5821}
5822
5823static int
5824bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5825{
5826 int mac_offset = BGE_EE_MAC_OFFSET;
5827
5828 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5829 mac_offset = BGE_EE_MAC_OFFSET_5906;
5830
5831 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5832 ETHER_ADDR_LEN));
5833}
5834
5835static int
5836bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5837{
5838
5839 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5840 return (1);
5841
5842 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5843 ETHER_ADDR_LEN));
5844}
5845
5846static int
5847bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5848{
5849 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5850 /* NOTE: Order is critical */
5851 bge_get_eaddr_fw,
5852 bge_get_eaddr_mem,
5853 bge_get_eaddr_nvram,
5854 bge_get_eaddr_eeprom,
5855 NULL
5856 };
5857 const bge_eaddr_fcn_t *func;
5858
5859 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5860 if ((*func)(sc, eaddr) == 0)
5861 break;
5862 }
5863 return (*func == NULL ? ENXIO : 0);
5864}