Deleted Added
full compact
if_bge.c (214216) if_bge.c (214219)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 214216 2010-10-22 18:31:44Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 214219 2010-10-22 19:30:56Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
218
219 { SK_VENDORID, SK_DEVICEID_ALTIMA },
220
221 { TC_VENDORID, TC_DEVICEID_3C996 },
222
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
226
227 { 0, 0 }
228};
229
230static const struct bge_vendor {
231 uint16_t v_id;
232 const char *v_name;
233} bge_vendors[] = {
234 { ALTEON_VENDORID, "Alteon" },
235 { ALTIMA_VENDORID, "Altima" },
236 { APPLE_VENDORID, "Apple" },
237 { BCOM_VENDORID, "Broadcom" },
238 { SK_VENDORID, "SysKonnect" },
239 { TC_VENDORID, "3Com" },
240 { FJTSU_VENDORID, "Fujitsu" },
241
242 { 0, NULL }
243};
244
245static const struct bge_revision {
246 uint32_t br_chipid;
247 const char *br_name;
248} bge_revisions[] = {
249 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
250 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
251 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
252 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
253 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
254 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
255 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
256 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
257 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
258 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
259 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
260 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
261 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
262 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
263 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
264 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
265 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
266 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
267 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
268 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
269 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
270 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
271 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
272 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
273 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
274 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
275 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
276 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
277 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
278 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
279 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
280 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
281 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
282 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
283 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
284 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
285 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
286 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
287 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
288 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
289 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
290 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
291 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
292 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
293 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
294 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
295 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
296 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
297 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
298 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
299 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
302 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
303 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
304 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
305 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
306 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
307 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
308
309 { 0, NULL }
310};
311
312/*
313 * Some defaults for major revisions, so that newer steppings
314 * that we don't know about have a shot at working.
315 */
316static const struct bge_revision bge_majorrevs[] = {
317 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
318 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
319 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
320 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
321 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
322 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
323 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
324 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
325 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
326 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
327 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
328 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
329 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
330 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
331 /* 5754 and 5787 share the same ASIC ID */
332 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
333 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
334 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
335
336 { 0, NULL }
337};
338
339#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
340#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
341#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
342#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
343#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
344#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
345
346const struct bge_revision * bge_lookup_rev(uint32_t);
347const struct bge_vendor * bge_lookup_vendor(uint16_t);
348
349typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
350
351static int bge_probe(device_t);
352static int bge_attach(device_t);
353static int bge_detach(device_t);
354static int bge_suspend(device_t);
355static int bge_resume(device_t);
356static void bge_release_resources(struct bge_softc *);
357static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
358static int bge_dma_alloc(struct bge_softc *);
359static void bge_dma_free(struct bge_softc *);
360static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
361 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
362
363static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
364static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
365static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
366static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
367static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
368
369static void bge_txeof(struct bge_softc *, uint16_t);
370static int bge_rxeof(struct bge_softc *, uint16_t, int);
371
372static void bge_asf_driver_up (struct bge_softc *);
373static void bge_tick(void *);
374static void bge_stats_clear_regs(struct bge_softc *);
375static void bge_stats_update(struct bge_softc *);
376static void bge_stats_update_regs(struct bge_softc *);
377static struct mbuf *bge_check_short_dma(struct mbuf *);
378static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
379 uint16_t *);
380static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
381
382static void bge_intr(void *);
383static int bge_msi_intr(void *);
384static void bge_intr_task(void *, int);
385static void bge_start_locked(struct ifnet *);
386static void bge_start(struct ifnet *);
387static int bge_ioctl(struct ifnet *, u_long, caddr_t);
388static void bge_init_locked(struct bge_softc *);
389static void bge_init(void *);
390static void bge_stop(struct bge_softc *);
391static void bge_watchdog(struct bge_softc *);
392static int bge_shutdown(device_t);
393static int bge_ifmedia_upd_locked(struct ifnet *);
394static int bge_ifmedia_upd(struct ifnet *);
395static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
396
397static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
398static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
399
400static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
401static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
402
403static void bge_setpromisc(struct bge_softc *);
404static void bge_setmulti(struct bge_softc *);
405static void bge_setvlan(struct bge_softc *);
406
407static __inline void bge_rxreuse_std(struct bge_softc *, int);
408static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
409static int bge_newbuf_std(struct bge_softc *, int);
410static int bge_newbuf_jumbo(struct bge_softc *, int);
411static int bge_init_rx_ring_std(struct bge_softc *);
412static void bge_free_rx_ring_std(struct bge_softc *);
413static int bge_init_rx_ring_jumbo(struct bge_softc *);
414static void bge_free_rx_ring_jumbo(struct bge_softc *);
415static void bge_free_tx_ring(struct bge_softc *);
416static int bge_init_tx_ring(struct bge_softc *);
417
418static int bge_chipinit(struct bge_softc *);
419static int bge_blockinit(struct bge_softc *);
420
421static int bge_has_eaddr(struct bge_softc *);
422static uint32_t bge_readmem_ind(struct bge_softc *, int);
423static void bge_writemem_ind(struct bge_softc *, int, int);
424static void bge_writembx(struct bge_softc *, int, int);
425#ifdef notdef
426static uint32_t bge_readreg_ind(struct bge_softc *, int);
427#endif
428static void bge_writemem_direct(struct bge_softc *, int, int);
429static void bge_writereg_ind(struct bge_softc *, int, int);
430
431static int bge_miibus_readreg(device_t, int, int);
432static int bge_miibus_writereg(device_t, int, int, int);
433static void bge_miibus_statchg(device_t);
434#ifdef DEVICE_POLLING
435static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
436#endif
437
438#define BGE_RESET_START 1
439#define BGE_RESET_STOP 2
440static void bge_sig_post_reset(struct bge_softc *, int);
441static void bge_sig_legacy(struct bge_softc *, int);
442static void bge_sig_pre_reset(struct bge_softc *, int);
443static void bge_stop_fw(struct bge_softc *);
444static int bge_reset(struct bge_softc *);
445static void bge_link_upd(struct bge_softc *);
446
447/*
448 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
449 * leak information to untrusted users. It is also known to cause alignment
450 * traps on certain architectures.
451 */
452#ifdef BGE_REGISTER_DEBUG
453static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
454static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
455static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
456#endif
457static void bge_add_sysctls(struct bge_softc *);
458static void bge_add_sysctl_stats_regs(struct bge_softc *,
459 struct sysctl_ctx_list *, struct sysctl_oid_list *);
460static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
461 struct sysctl_oid_list *);
462static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
463
464static device_method_t bge_methods[] = {
465 /* Device interface */
466 DEVMETHOD(device_probe, bge_probe),
467 DEVMETHOD(device_attach, bge_attach),
468 DEVMETHOD(device_detach, bge_detach),
469 DEVMETHOD(device_shutdown, bge_shutdown),
470 DEVMETHOD(device_suspend, bge_suspend),
471 DEVMETHOD(device_resume, bge_resume),
472
473 /* bus interface */
474 DEVMETHOD(bus_print_child, bus_generic_print_child),
475 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
476
477 /* MII interface */
478 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
479 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
480 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
481
482 { 0, 0 }
483};
484
485static driver_t bge_driver = {
486 "bge",
487 bge_methods,
488 sizeof(struct bge_softc)
489};
490
491static devclass_t bge_devclass;
492
493DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
494DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
495
496static int bge_allow_asf = 1;
497
498TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
499
500SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
501SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
502 "Allow ASF mode if available");
503
504#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
505#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
506#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
507#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
508#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
509
510static int
511bge_has_eaddr(struct bge_softc *sc)
512{
513#ifdef __sparc64__
514 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
515 device_t dev;
516 uint32_t subvendor;
517
518 dev = sc->bge_dev;
519
520 /*
521 * The on-board BGEs found in sun4u machines aren't fitted with
522 * an EEPROM which means that we have to obtain the MAC address
523 * via OFW and that some tests will always fail. We distinguish
524 * such BGEs by the subvendor ID, which also has to be obtained
525 * from OFW instead of the PCI configuration space as the latter
526 * indicates Broadcom as the subvendor of the netboot interface.
527 * For early Blade 1500 and 2500 we even have to check the OFW
528 * device path as the subvendor ID always defaults to Broadcom
529 * there.
530 */
531 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
532 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
533 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
534 return (0);
535 memset(buf, 0, sizeof(buf));
536 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
537 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
538 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
539 return (0);
540 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
541 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
542 return (0);
543 }
544#endif
545 return (1);
546}
547
548static uint32_t
549bge_readmem_ind(struct bge_softc *sc, int off)
550{
551 device_t dev;
552 uint32_t val;
553
554 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
555 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
556 return (0);
557
558 dev = sc->bge_dev;
559
560 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
561 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
562 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
563 return (val);
564}
565
566static void
567bge_writemem_ind(struct bge_softc *sc, int off, int val)
568{
569 device_t dev;
570
571 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
572 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
573 return;
574
575 dev = sc->bge_dev;
576
577 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
578 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
579 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
580}
581
582#ifdef notdef
583static uint32_t
584bge_readreg_ind(struct bge_softc *sc, int off)
585{
586 device_t dev;
587
588 dev = sc->bge_dev;
589
590 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
591 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
592}
593#endif
594
595static void
596bge_writereg_ind(struct bge_softc *sc, int off, int val)
597{
598 device_t dev;
599
600 dev = sc->bge_dev;
601
602 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
603 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
604}
605
606static void
607bge_writemem_direct(struct bge_softc *sc, int off, int val)
608{
609 CSR_WRITE_4(sc, off, val);
610}
611
612static void
613bge_writembx(struct bge_softc *sc, int off, int val)
614{
615 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
616 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
617
618 CSR_WRITE_4(sc, off, val);
619}
620
621/*
622 * Map a single buffer address.
623 */
624
625static void
626bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
627{
628 struct bge_dmamap_arg *ctx;
629
630 if (error)
631 return;
632
633 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
634
635 ctx = arg;
636 ctx->bge_busaddr = segs->ds_addr;
637}
638
639static uint8_t
640bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
641{
642 uint32_t access, byte = 0;
643 int i;
644
645 /* Lock. */
646 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
647 for (i = 0; i < 8000; i++) {
648 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
649 break;
650 DELAY(20);
651 }
652 if (i == 8000)
653 return (1);
654
655 /* Enable access. */
656 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
657 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
658
659 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
660 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
661 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
662 DELAY(10);
663 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
664 DELAY(10);
665 break;
666 }
667 }
668
669 if (i == BGE_TIMEOUT * 10) {
670 if_printf(sc->bge_ifp, "nvram read timed out\n");
671 return (1);
672 }
673
674 /* Get result. */
675 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
676
677 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
678
679 /* Disable access. */
680 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
681
682 /* Unlock. */
683 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
684 CSR_READ_4(sc, BGE_NVRAM_SWARB);
685
686 return (0);
687}
688
689/*
690 * Read a sequence of bytes from NVRAM.
691 */
692static int
693bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
694{
695 int err = 0, i;
696 uint8_t byte = 0;
697
698 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
699 return (1);
700
701 for (i = 0; i < cnt; i++) {
702 err = bge_nvram_getbyte(sc, off + i, &byte);
703 if (err)
704 break;
705 *(dest + i) = byte;
706 }
707
708 return (err ? 1 : 0);
709}
710
711/*
712 * Read a byte of data stored in the EEPROM at address 'addr.' The
713 * BCM570x supports both the traditional bitbang interface and an
714 * auto access interface for reading the EEPROM. We use the auto
715 * access method.
716 */
717static uint8_t
718bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
719{
720 int i;
721 uint32_t byte = 0;
722
723 /*
724 * Enable use of auto EEPROM access so we can avoid
725 * having to use the bitbang method.
726 */
727 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
728
729 /* Reset the EEPROM, load the clock period. */
730 CSR_WRITE_4(sc, BGE_EE_ADDR,
731 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
732 DELAY(20);
733
734 /* Issue the read EEPROM command. */
735 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
736
737 /* Wait for completion */
738 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
739 DELAY(10);
740 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
741 break;
742 }
743
744 if (i == BGE_TIMEOUT * 10) {
745 device_printf(sc->bge_dev, "EEPROM read timed out\n");
746 return (1);
747 }
748
749 /* Get result. */
750 byte = CSR_READ_4(sc, BGE_EE_DATA);
751
752 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
753
754 return (0);
755}
756
757/*
758 * Read a sequence of bytes from the EEPROM.
759 */
760static int
761bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
762{
763 int i, error = 0;
764 uint8_t byte = 0;
765
766 for (i = 0; i < cnt; i++) {
767 error = bge_eeprom_getbyte(sc, off + i, &byte);
768 if (error)
769 break;
770 *(dest + i) = byte;
771 }
772
773 return (error ? 1 : 0);
774}
775
776static int
777bge_miibus_readreg(device_t dev, int phy, int reg)
778{
779 struct bge_softc *sc;
780 uint32_t val;
781 int i;
782
783 sc = device_get_softc(dev);
784
785 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
786 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
787 CSR_WRITE_4(sc, BGE_MI_MODE,
788 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
789 DELAY(80);
790 }
791
792 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
793 BGE_MIPHY(phy) | BGE_MIREG(reg));
794
795 /* Poll for the PHY register access to complete. */
796 for (i = 0; i < BGE_TIMEOUT; i++) {
797 DELAY(10);
798 val = CSR_READ_4(sc, BGE_MI_COMM);
799 if ((val & BGE_MICOMM_BUSY) == 0) {
800 DELAY(5);
801 val = CSR_READ_4(sc, BGE_MI_COMM);
802 break;
803 }
804 }
805
806 if (i == BGE_TIMEOUT) {
807 device_printf(sc->bge_dev,
808 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
809 phy, reg, val);
810 val = 0;
811 }
812
813 /* Restore the autopoll bit if necessary. */
814 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
815 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
816 DELAY(80);
817 }
818
819 if (val & BGE_MICOMM_READFAIL)
820 return (0);
821
822 return (val & 0xFFFF);
823}
824
825static int
826bge_miibus_writereg(device_t dev, int phy, int reg, int val)
827{
828 struct bge_softc *sc;
829 int i;
830
831 sc = device_get_softc(dev);
832
833 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
834 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
835 return (0);
836
837 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
838 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
839 CSR_WRITE_4(sc, BGE_MI_MODE,
840 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
841 DELAY(80);
842 }
843
844 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
845 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
846
847 for (i = 0; i < BGE_TIMEOUT; i++) {
848 DELAY(10);
849 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
850 DELAY(5);
851 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
852 break;
853 }
854 }
855
856 /* Restore the autopoll bit if necessary. */
857 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
858 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
859 DELAY(80);
860 }
861
862 if (i == BGE_TIMEOUT)
863 device_printf(sc->bge_dev,
864 "PHY write timed out (phy %d, reg %d, val %d)\n",
865 phy, reg, val);
866
867 return (0);
868}
869
870static void
871bge_miibus_statchg(device_t dev)
872{
873 struct bge_softc *sc;
874 struct mii_data *mii;
875 sc = device_get_softc(dev);
876 mii = device_get_softc(sc->bge_miibus);
877
878 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
879 (IFM_ACTIVE | IFM_AVALID)) {
880 switch (IFM_SUBTYPE(mii->mii_media_active)) {
881 case IFM_10_T:
882 case IFM_100_TX:
883 sc->bge_link = 1;
884 break;
885 case IFM_1000_T:
886 case IFM_1000_SX:
887 case IFM_2500_SX:
888 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
889 sc->bge_link = 1;
890 else
891 sc->bge_link = 0;
892 break;
893 default:
894 sc->bge_link = 0;
895 break;
896 }
897 } else
898 sc->bge_link = 0;
899 if (sc->bge_link == 0)
900 return;
901 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
902 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
903 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
904 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
905 else
906 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
907
908 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
909 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
910 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG1)
911 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
912 else
913 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
914 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0)
915 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
916 else
917 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
918 } else {
919 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
920 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
921 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
922 }
923}
924
925/*
926 * Intialize a standard receive ring descriptor.
927 */
928static int
929bge_newbuf_std(struct bge_softc *sc, int i)
930{
931 struct mbuf *m;
932 struct bge_rx_bd *r;
933 bus_dma_segment_t segs[1];
934 bus_dmamap_t map;
935 int error, nsegs;
936
937 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
938 if (m == NULL)
939 return (ENOBUFS);
940 m->m_len = m->m_pkthdr.len = MCLBYTES;
941 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
942 m_adj(m, ETHER_ALIGN);
943
944 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
945 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
946 if (error != 0) {
947 m_freem(m);
948 return (error);
949 }
950 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
951 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
952 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
953 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
954 sc->bge_cdata.bge_rx_std_dmamap[i]);
955 }
956 map = sc->bge_cdata.bge_rx_std_dmamap[i];
957 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
958 sc->bge_cdata.bge_rx_std_sparemap = map;
959 sc->bge_cdata.bge_rx_std_chain[i] = m;
960 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
961 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
962 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
963 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
964 r->bge_flags = BGE_RXBDFLAG_END;
965 r->bge_len = segs[0].ds_len;
966 r->bge_idx = i;
967
968 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
969 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
970
971 return (0);
972}
973
974/*
975 * Initialize a jumbo receive ring descriptor. This allocates
976 * a jumbo buffer from the pool managed internally by the driver.
977 */
978static int
979bge_newbuf_jumbo(struct bge_softc *sc, int i)
980{
981 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
982 bus_dmamap_t map;
983 struct bge_extrx_bd *r;
984 struct mbuf *m;
985 int error, nsegs;
986
987 MGETHDR(m, M_DONTWAIT, MT_DATA);
988 if (m == NULL)
989 return (ENOBUFS);
990
991 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
992 if (!(m->m_flags & M_EXT)) {
993 m_freem(m);
994 return (ENOBUFS);
995 }
996 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
997 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
998 m_adj(m, ETHER_ALIGN);
999
1000 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1001 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1002 if (error != 0) {
1003 m_freem(m);
1004 return (error);
1005 }
1006
1007 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1008 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1009 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1010 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1011 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1012 }
1013 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1014 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1015 sc->bge_cdata.bge_rx_jumbo_sparemap;
1016 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1017 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1018 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1019 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1020 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1021 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1022
1023 /*
1024 * Fill in the extended RX buffer descriptor.
1025 */
1026 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1027 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1028 r->bge_idx = i;
1029 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1030 switch (nsegs) {
1031 case 4:
1032 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1033 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1034 r->bge_len3 = segs[3].ds_len;
1035 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1036 case 3:
1037 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1038 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1039 r->bge_len2 = segs[2].ds_len;
1040 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1041 case 2:
1042 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1043 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1044 r->bge_len1 = segs[1].ds_len;
1045 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1046 case 1:
1047 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1048 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1049 r->bge_len0 = segs[0].ds_len;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1051 break;
1052 default:
1053 panic("%s: %d segments\n", __func__, nsegs);
1054 }
1055
1056 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1057 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1058
1059 return (0);
1060}
1061
1062static int
1063bge_init_rx_ring_std(struct bge_softc *sc)
1064{
1065 int error, i;
1066
1067 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1068 sc->bge_std = 0;
1069 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1070 if ((error = bge_newbuf_std(sc, i)) != 0)
1071 return (error);
1072 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1073 }
1074
1075 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1076 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1077
1078 sc->bge_std = 0;
1079 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1080
1081 return (0);
1082}
1083
1084static void
1085bge_free_rx_ring_std(struct bge_softc *sc)
1086{
1087 int i;
1088
1089 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1090 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1091 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1092 sc->bge_cdata.bge_rx_std_dmamap[i],
1093 BUS_DMASYNC_POSTREAD);
1094 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1095 sc->bge_cdata.bge_rx_std_dmamap[i]);
1096 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1097 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1098 }
1099 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1100 sizeof(struct bge_rx_bd));
1101 }
1102}
1103
1104static int
1105bge_init_rx_ring_jumbo(struct bge_softc *sc)
1106{
1107 struct bge_rcb *rcb;
1108 int error, i;
1109
1110 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1111 sc->bge_jumbo = 0;
1112 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1113 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1114 return (error);
1115 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1116 }
1117
1118 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1119 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1120
1121 sc->bge_jumbo = 0;
1122
1123 /* Enable the jumbo receive producer ring. */
1124 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1125 rcb->bge_maxlen_flags =
1126 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1127 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1128
1129 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1130
1131 return (0);
1132}
1133
1134static void
1135bge_free_rx_ring_jumbo(struct bge_softc *sc)
1136{
1137 int i;
1138
1139 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1140 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1141 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1142 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1143 BUS_DMASYNC_POSTREAD);
1144 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1145 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1146 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1147 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1148 }
1149 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1150 sizeof(struct bge_extrx_bd));
1151 }
1152}
1153
1154static void
1155bge_free_tx_ring(struct bge_softc *sc)
1156{
1157 int i;
1158
1159 if (sc->bge_ldata.bge_tx_ring == NULL)
1160 return;
1161
1162 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1163 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1164 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1165 sc->bge_cdata.bge_tx_dmamap[i],
1166 BUS_DMASYNC_POSTWRITE);
1167 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1168 sc->bge_cdata.bge_tx_dmamap[i]);
1169 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1170 sc->bge_cdata.bge_tx_chain[i] = NULL;
1171 }
1172 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1173 sizeof(struct bge_tx_bd));
1174 }
1175}
1176
1177static int
1178bge_init_tx_ring(struct bge_softc *sc)
1179{
1180 sc->bge_txcnt = 0;
1181 sc->bge_tx_saved_considx = 0;
1182
1183 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1184 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1185 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1186
1187 /* Initialize transmit producer index for host-memory send ring. */
1188 sc->bge_tx_prodidx = 0;
1189 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1190
1191 /* 5700 b2 errata */
1192 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1193 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1194
1195 /* NIC-memory send ring not used; initialize to zero. */
1196 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1197 /* 5700 b2 errata */
1198 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1199 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1200
1201 return (0);
1202}
1203
1204static void
1205bge_setpromisc(struct bge_softc *sc)
1206{
1207 struct ifnet *ifp;
1208
1209 BGE_LOCK_ASSERT(sc);
1210
1211 ifp = sc->bge_ifp;
1212
1213 /* Enable or disable promiscuous mode as needed. */
1214 if (ifp->if_flags & IFF_PROMISC)
1215 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1216 else
1217 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1218}
1219
1220static void
1221bge_setmulti(struct bge_softc *sc)
1222{
1223 struct ifnet *ifp;
1224 struct ifmultiaddr *ifma;
1225 uint32_t hashes[4] = { 0, 0, 0, 0 };
1226 int h, i;
1227
1228 BGE_LOCK_ASSERT(sc);
1229
1230 ifp = sc->bge_ifp;
1231
1232 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1233 for (i = 0; i < 4; i++)
1234 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1235 return;
1236 }
1237
1238 /* First, zot all the existing filters. */
1239 for (i = 0; i < 4; i++)
1240 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1241
1242 /* Now program new ones. */
1243 if_maddr_rlock(ifp);
1244 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1245 if (ifma->ifma_addr->sa_family != AF_LINK)
1246 continue;
1247 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1248 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1249 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1250 }
1251 if_maddr_runlock(ifp);
1252
1253 for (i = 0; i < 4; i++)
1254 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1255}
1256
1257static void
1258bge_setvlan(struct bge_softc *sc)
1259{
1260 struct ifnet *ifp;
1261
1262 BGE_LOCK_ASSERT(sc);
1263
1264 ifp = sc->bge_ifp;
1265
1266 /* Enable or disable VLAN tag stripping as needed. */
1267 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1268 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1269 else
1270 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1271}
1272
1273static void
1274bge_sig_pre_reset(struct bge_softc *sc, int type)
1275{
1276
1277 /*
1278 * Some chips don't like this so only do this if ASF is enabled
1279 */
1280 if (sc->bge_asf_mode)
1281 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1282
1283 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1284 switch (type) {
1285 case BGE_RESET_START:
1286 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1287 break;
1288 case BGE_RESET_STOP:
1289 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1290 break;
1291 }
1292 }
1293}
1294
1295static void
1296bge_sig_post_reset(struct bge_softc *sc, int type)
1297{
1298
1299 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1300 switch (type) {
1301 case BGE_RESET_START:
1302 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1303 /* START DONE */
1304 break;
1305 case BGE_RESET_STOP:
1306 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1307 break;
1308 }
1309 }
1310}
1311
1312static void
1313bge_sig_legacy(struct bge_softc *sc, int type)
1314{
1315
1316 if (sc->bge_asf_mode) {
1317 switch (type) {
1318 case BGE_RESET_START:
1319 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1320 break;
1321 case BGE_RESET_STOP:
1322 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1323 break;
1324 }
1325 }
1326}
1327
1328static void
1329bge_stop_fw(struct bge_softc *sc)
1330{
1331 int i;
1332
1333 if (sc->bge_asf_mode) {
1334 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1335 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1336 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1337
1338 for (i = 0; i < 100; i++ ) {
1339 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1340 break;
1341 DELAY(10);
1342 }
1343 }
1344}
1345
1346/*
1347 * Do endian, PCI and DMA initialization.
1348 */
1349static int
1350bge_chipinit(struct bge_softc *sc)
1351{
1352 uint32_t dma_rw_ctl;
1353 uint16_t val;
1354 int i;
1355
1356 /* Set endianness before we access any non-PCI registers. */
1357 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1358
1359 /* Clear the MAC control register */
1360 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1361
1362 /*
1363 * Clear the MAC statistics block in the NIC's
1364 * internal memory.
1365 */
1366 for (i = BGE_STATS_BLOCK;
1367 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1368 BGE_MEMWIN_WRITE(sc, i, 0);
1369
1370 for (i = BGE_STATUS_BLOCK;
1371 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1372 BGE_MEMWIN_WRITE(sc, i, 0);
1373
1374 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1375 /*
1376 * Fix data corruption caused by non-qword write with WB.
1377 * Fix master abort in PCI mode.
1378 * Fix PCI latency timer.
1379 */
1380 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1381 val |= (1 << 10) | (1 << 12) | (1 << 13);
1382 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1383 }
1384
1385 /*
1386 * Set up the PCI DMA control register.
1387 */
1388 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1389 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1390 if (sc->bge_flags & BGE_FLAG_PCIE) {
1391 /* Read watermark not used, 128 bytes for write. */
1392 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1393 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1394 if (BGE_IS_5714_FAMILY(sc)) {
1395 /* 256 bytes for read and write. */
1396 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1397 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1398 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1399 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1400 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1401 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1402 /*
1403 * In the BCM5703, the DMA read watermark should
1404 * be set to less than or equal to the maximum
1405 * memory read byte count of the PCI-X command
1406 * register.
1407 */
1408 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1409 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1410 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1411 /* 1536 bytes for read, 384 bytes for write. */
1412 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1413 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1414 } else {
1415 /* 384 bytes for read and write. */
1416 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1417 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1418 0x0F;
1419 }
1420 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1421 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1422 uint32_t tmp;
1423
1424 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1425 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1426 if (tmp == 6 || tmp == 7)
1427 dma_rw_ctl |=
1428 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1429
1430 /* Set PCI-X DMA write workaround. */
1431 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1432 }
1433 } else {
1434 /* Conventional PCI bus: 256 bytes for read and write. */
1435 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1436 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1437
1438 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1439 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1440 dma_rw_ctl |= 0x0F;
1441 }
1442 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1443 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1444 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1445 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1446 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1447 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1448 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1449 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1450
1451 /*
1452 * Set up general mode register.
1453 */
1454 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1455 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1456 BGE_MODECTL_TX_NO_PHDR_CSUM);
1457
1458 /*
1459 * BCM5701 B5 have a bug causing data corruption when using
1460 * 64-bit DMA reads, which can be terminated early and then
1461 * completed later as 32-bit accesses, in combination with
1462 * certain bridges.
1463 */
1464 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1465 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1466 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1467
1468 /*
1469 * Tell the firmware the driver is running
1470 */
1471 if (sc->bge_asf_mode & ASF_STACKUP)
1472 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1473
1474 /*
1475 * Disable memory write invalidate. Apparently it is not supported
1476 * properly by these devices. Also ensure that INTx isn't disabled,
1477 * as these chips need it even when using MSI.
1478 */
1479 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1480 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1481
1482 /* Set the timer prescaler (always 66Mhz) */
1483 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1484
1485 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1486 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1487 DELAY(40); /* XXX */
1488
1489 /* Put PHY into ready state */
1490 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1491 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1492 DELAY(40);
1493 }
1494
1495 return (0);
1496}
1497
1498static int
1499bge_blockinit(struct bge_softc *sc)
1500{
1501 struct bge_rcb *rcb;
1502 bus_size_t vrcb;
1503 bge_hostaddr taddr;
1504 uint32_t val;
1505 int i, limit;
1506
1507 /*
1508 * Initialize the memory window pointer register so that
1509 * we can access the first 32K of internal NIC RAM. This will
1510 * allow us to set up the TX send ring RCBs and the RX return
1511 * ring RCBs, plus other things which live in NIC memory.
1512 */
1513 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1514
1515 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1516
1517 if (!(BGE_IS_5705_PLUS(sc))) {
1518 /* Configure mbuf memory pool */
1519 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1520 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1521 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1522 else
1523 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1524
1525 /* Configure DMA resource pool */
1526 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1527 BGE_DMA_DESCRIPTORS);
1528 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1529 }
1530
1531 /* Configure mbuf pool watermarks */
1532 if (!BGE_IS_5705_PLUS(sc)) {
1533 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1534 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1535 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1536 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1537 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1538 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1539 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1540 } else {
1541 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1542 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1543 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1544 }
1545
1546 /* Configure DMA resource watermarks */
1547 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1548 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1549
1550 /* Enable buffer manager */
1551 if (!(BGE_IS_5705_PLUS(sc))) {
1552 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1553 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1554
1555 /* Poll for buffer manager start indication */
1556 for (i = 0; i < BGE_TIMEOUT; i++) {
1557 DELAY(10);
1558 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1559 break;
1560 }
1561
1562 if (i == BGE_TIMEOUT) {
1563 device_printf(sc->bge_dev,
1564 "buffer manager failed to start\n");
1565 return (ENXIO);
1566 }
1567 }
1568
1569 /* Enable flow-through queues */
1570 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1571 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1572
1573 /* Wait until queue initialization is complete */
1574 for (i = 0; i < BGE_TIMEOUT; i++) {
1575 DELAY(10);
1576 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1577 break;
1578 }
1579
1580 if (i == BGE_TIMEOUT) {
1581 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1582 return (ENXIO);
1583 }
1584
1585 /*
1586 * Summary of rings supported by the controller:
1587 *
1588 * Standard Receive Producer Ring
1589 * - This ring is used to feed receive buffers for "standard"
1590 * sized frames (typically 1536 bytes) to the controller.
1591 *
1592 * Jumbo Receive Producer Ring
1593 * - This ring is used to feed receive buffers for jumbo sized
1594 * frames (i.e. anything bigger than the "standard" frames)
1595 * to the controller.
1596 *
1597 * Mini Receive Producer Ring
1598 * - This ring is used to feed receive buffers for "mini"
1599 * sized frames to the controller.
1600 * - This feature required external memory for the controller
1601 * but was never used in a production system. Should always
1602 * be disabled.
1603 *
1604 * Receive Return Ring
1605 * - After the controller has placed an incoming frame into a
1606 * receive buffer that buffer is moved into a receive return
1607 * ring. The driver is then responsible to passing the
1608 * buffer up to the stack. Many versions of the controller
1609 * support multiple RR rings.
1610 *
1611 * Send Ring
1612 * - This ring is used for outgoing frames. Many versions of
1613 * the controller support multiple send rings.
1614 */
1615
1616 /* Initialize the standard receive producer ring control block. */
1617 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1618 rcb->bge_hostaddr.bge_addr_lo =
1619 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1620 rcb->bge_hostaddr.bge_addr_hi =
1621 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1622 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1623 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1624 if (BGE_IS_5705_PLUS(sc)) {
1625 /*
1626 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1627 * Bits 15-2 : Reserved (should be 0)
1628 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1629 * Bit 0 : Reserved
1630 */
1631 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1632 } else {
1633 /*
1634 * Ring size is always XXX entries
1635 * Bits 31-16: Maximum RX frame size
1636 * Bits 15-2 : Reserved (should be 0)
1637 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1638 * Bit 0 : Reserved
1639 */
1640 rcb->bge_maxlen_flags =
1641 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1642 }
1643 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1644 /* Write the standard receive producer ring control block. */
1645 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1646 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1647 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1648 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1649
1650 /* Reset the standard receive producer ring producer index. */
1651 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1652
1653 /*
1654 * Initialize the jumbo RX producer ring control
1655 * block. We set the 'ring disabled' bit in the
1656 * flags field until we're actually ready to start
1657 * using this ring (i.e. once we set the MTU
1658 * high enough to require it).
1659 */
1660 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1661 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1662 /* Get the jumbo receive producer ring RCB parameters. */
1663 rcb->bge_hostaddr.bge_addr_lo =
1664 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1665 rcb->bge_hostaddr.bge_addr_hi =
1666 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1667 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1668 sc->bge_cdata.bge_rx_jumbo_ring_map,
1669 BUS_DMASYNC_PREREAD);
1670 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1671 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1672 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1673 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1674 rcb->bge_hostaddr.bge_addr_hi);
1675 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1676 rcb->bge_hostaddr.bge_addr_lo);
1677 /* Program the jumbo receive producer ring RCB parameters. */
1678 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1679 rcb->bge_maxlen_flags);
1680 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1681 /* Reset the jumbo receive producer ring producer index. */
1682 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1683 }
1684
1685 /* Disable the mini receive producer ring RCB. */
1686 if (BGE_IS_5700_FAMILY(sc)) {
1687 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1688 rcb->bge_maxlen_flags =
1689 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1690 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1691 rcb->bge_maxlen_flags);
1692 /* Reset the mini receive producer ring producer index. */
1693 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1694 }
1695
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
218
219 { SK_VENDORID, SK_DEVICEID_ALTIMA },
220
221 { TC_VENDORID, TC_DEVICEID_3C996 },
222
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
226
227 { 0, 0 }
228};
229
230static const struct bge_vendor {
231 uint16_t v_id;
232 const char *v_name;
233} bge_vendors[] = {
234 { ALTEON_VENDORID, "Alteon" },
235 { ALTIMA_VENDORID, "Altima" },
236 { APPLE_VENDORID, "Apple" },
237 { BCOM_VENDORID, "Broadcom" },
238 { SK_VENDORID, "SysKonnect" },
239 { TC_VENDORID, "3Com" },
240 { FJTSU_VENDORID, "Fujitsu" },
241
242 { 0, NULL }
243};
244
245static const struct bge_revision {
246 uint32_t br_chipid;
247 const char *br_name;
248} bge_revisions[] = {
249 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
250 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
251 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
252 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
253 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
254 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
255 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
256 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
257 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
258 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
259 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
260 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
261 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
262 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
263 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
264 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
265 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
266 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
267 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
268 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
269 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
270 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
271 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
272 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
273 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
274 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
275 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
276 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
277 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
278 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
279 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
280 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
281 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
282 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
283 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
284 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
285 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
286 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
287 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
288 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
289 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
290 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
291 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
292 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
293 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
294 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
295 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
296 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
297 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
298 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
299 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
302 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
303 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
304 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
305 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
306 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
307 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
308
309 { 0, NULL }
310};
311
312/*
313 * Some defaults for major revisions, so that newer steppings
314 * that we don't know about have a shot at working.
315 */
316static const struct bge_revision bge_majorrevs[] = {
317 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
318 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
319 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
320 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
321 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
322 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
323 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
324 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
325 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
326 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
327 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
328 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
329 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
330 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
331 /* 5754 and 5787 share the same ASIC ID */
332 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
333 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
334 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
335
336 { 0, NULL }
337};
338
339#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
340#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
341#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
342#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
343#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
344#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
345
346const struct bge_revision * bge_lookup_rev(uint32_t);
347const struct bge_vendor * bge_lookup_vendor(uint16_t);
348
349typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
350
351static int bge_probe(device_t);
352static int bge_attach(device_t);
353static int bge_detach(device_t);
354static int bge_suspend(device_t);
355static int bge_resume(device_t);
356static void bge_release_resources(struct bge_softc *);
357static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
358static int bge_dma_alloc(struct bge_softc *);
359static void bge_dma_free(struct bge_softc *);
360static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
361 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
362
363static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
364static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
365static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
366static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
367static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
368
369static void bge_txeof(struct bge_softc *, uint16_t);
370static int bge_rxeof(struct bge_softc *, uint16_t, int);
371
372static void bge_asf_driver_up (struct bge_softc *);
373static void bge_tick(void *);
374static void bge_stats_clear_regs(struct bge_softc *);
375static void bge_stats_update(struct bge_softc *);
376static void bge_stats_update_regs(struct bge_softc *);
377static struct mbuf *bge_check_short_dma(struct mbuf *);
378static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
379 uint16_t *);
380static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
381
382static void bge_intr(void *);
383static int bge_msi_intr(void *);
384static void bge_intr_task(void *, int);
385static void bge_start_locked(struct ifnet *);
386static void bge_start(struct ifnet *);
387static int bge_ioctl(struct ifnet *, u_long, caddr_t);
388static void bge_init_locked(struct bge_softc *);
389static void bge_init(void *);
390static void bge_stop(struct bge_softc *);
391static void bge_watchdog(struct bge_softc *);
392static int bge_shutdown(device_t);
393static int bge_ifmedia_upd_locked(struct ifnet *);
394static int bge_ifmedia_upd(struct ifnet *);
395static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
396
397static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
398static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
399
400static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
401static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
402
403static void bge_setpromisc(struct bge_softc *);
404static void bge_setmulti(struct bge_softc *);
405static void bge_setvlan(struct bge_softc *);
406
407static __inline void bge_rxreuse_std(struct bge_softc *, int);
408static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
409static int bge_newbuf_std(struct bge_softc *, int);
410static int bge_newbuf_jumbo(struct bge_softc *, int);
411static int bge_init_rx_ring_std(struct bge_softc *);
412static void bge_free_rx_ring_std(struct bge_softc *);
413static int bge_init_rx_ring_jumbo(struct bge_softc *);
414static void bge_free_rx_ring_jumbo(struct bge_softc *);
415static void bge_free_tx_ring(struct bge_softc *);
416static int bge_init_tx_ring(struct bge_softc *);
417
418static int bge_chipinit(struct bge_softc *);
419static int bge_blockinit(struct bge_softc *);
420
421static int bge_has_eaddr(struct bge_softc *);
422static uint32_t bge_readmem_ind(struct bge_softc *, int);
423static void bge_writemem_ind(struct bge_softc *, int, int);
424static void bge_writembx(struct bge_softc *, int, int);
425#ifdef notdef
426static uint32_t bge_readreg_ind(struct bge_softc *, int);
427#endif
428static void bge_writemem_direct(struct bge_softc *, int, int);
429static void bge_writereg_ind(struct bge_softc *, int, int);
430
431static int bge_miibus_readreg(device_t, int, int);
432static int bge_miibus_writereg(device_t, int, int, int);
433static void bge_miibus_statchg(device_t);
434#ifdef DEVICE_POLLING
435static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
436#endif
437
438#define BGE_RESET_START 1
439#define BGE_RESET_STOP 2
440static void bge_sig_post_reset(struct bge_softc *, int);
441static void bge_sig_legacy(struct bge_softc *, int);
442static void bge_sig_pre_reset(struct bge_softc *, int);
443static void bge_stop_fw(struct bge_softc *);
444static int bge_reset(struct bge_softc *);
445static void bge_link_upd(struct bge_softc *);
446
447/*
448 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
449 * leak information to untrusted users. It is also known to cause alignment
450 * traps on certain architectures.
451 */
452#ifdef BGE_REGISTER_DEBUG
453static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
454static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
455static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
456#endif
457static void bge_add_sysctls(struct bge_softc *);
458static void bge_add_sysctl_stats_regs(struct bge_softc *,
459 struct sysctl_ctx_list *, struct sysctl_oid_list *);
460static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
461 struct sysctl_oid_list *);
462static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
463
464static device_method_t bge_methods[] = {
465 /* Device interface */
466 DEVMETHOD(device_probe, bge_probe),
467 DEVMETHOD(device_attach, bge_attach),
468 DEVMETHOD(device_detach, bge_detach),
469 DEVMETHOD(device_shutdown, bge_shutdown),
470 DEVMETHOD(device_suspend, bge_suspend),
471 DEVMETHOD(device_resume, bge_resume),
472
473 /* bus interface */
474 DEVMETHOD(bus_print_child, bus_generic_print_child),
475 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
476
477 /* MII interface */
478 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
479 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
480 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
481
482 { 0, 0 }
483};
484
485static driver_t bge_driver = {
486 "bge",
487 bge_methods,
488 sizeof(struct bge_softc)
489};
490
491static devclass_t bge_devclass;
492
493DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
494DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
495
496static int bge_allow_asf = 1;
497
498TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
499
500SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
501SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
502 "Allow ASF mode if available");
503
504#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
505#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
506#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
507#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
508#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
509
510static int
511bge_has_eaddr(struct bge_softc *sc)
512{
513#ifdef __sparc64__
514 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
515 device_t dev;
516 uint32_t subvendor;
517
518 dev = sc->bge_dev;
519
520 /*
521 * The on-board BGEs found in sun4u machines aren't fitted with
522 * an EEPROM which means that we have to obtain the MAC address
523 * via OFW and that some tests will always fail. We distinguish
524 * such BGEs by the subvendor ID, which also has to be obtained
525 * from OFW instead of the PCI configuration space as the latter
526 * indicates Broadcom as the subvendor of the netboot interface.
527 * For early Blade 1500 and 2500 we even have to check the OFW
528 * device path as the subvendor ID always defaults to Broadcom
529 * there.
530 */
531 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
532 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
533 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
534 return (0);
535 memset(buf, 0, sizeof(buf));
536 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
537 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
538 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
539 return (0);
540 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
541 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
542 return (0);
543 }
544#endif
545 return (1);
546}
547
548static uint32_t
549bge_readmem_ind(struct bge_softc *sc, int off)
550{
551 device_t dev;
552 uint32_t val;
553
554 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
555 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
556 return (0);
557
558 dev = sc->bge_dev;
559
560 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
561 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
562 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
563 return (val);
564}
565
566static void
567bge_writemem_ind(struct bge_softc *sc, int off, int val)
568{
569 device_t dev;
570
571 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
572 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
573 return;
574
575 dev = sc->bge_dev;
576
577 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
578 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
579 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
580}
581
582#ifdef notdef
583static uint32_t
584bge_readreg_ind(struct bge_softc *sc, int off)
585{
586 device_t dev;
587
588 dev = sc->bge_dev;
589
590 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
591 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
592}
593#endif
594
595static void
596bge_writereg_ind(struct bge_softc *sc, int off, int val)
597{
598 device_t dev;
599
600 dev = sc->bge_dev;
601
602 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
603 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
604}
605
606static void
607bge_writemem_direct(struct bge_softc *sc, int off, int val)
608{
609 CSR_WRITE_4(sc, off, val);
610}
611
612static void
613bge_writembx(struct bge_softc *sc, int off, int val)
614{
615 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
616 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
617
618 CSR_WRITE_4(sc, off, val);
619}
620
621/*
622 * Map a single buffer address.
623 */
624
625static void
626bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
627{
628 struct bge_dmamap_arg *ctx;
629
630 if (error)
631 return;
632
633 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
634
635 ctx = arg;
636 ctx->bge_busaddr = segs->ds_addr;
637}
638
639static uint8_t
640bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
641{
642 uint32_t access, byte = 0;
643 int i;
644
645 /* Lock. */
646 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
647 for (i = 0; i < 8000; i++) {
648 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
649 break;
650 DELAY(20);
651 }
652 if (i == 8000)
653 return (1);
654
655 /* Enable access. */
656 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
657 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
658
659 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
660 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
661 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
662 DELAY(10);
663 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
664 DELAY(10);
665 break;
666 }
667 }
668
669 if (i == BGE_TIMEOUT * 10) {
670 if_printf(sc->bge_ifp, "nvram read timed out\n");
671 return (1);
672 }
673
674 /* Get result. */
675 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
676
677 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
678
679 /* Disable access. */
680 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
681
682 /* Unlock. */
683 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
684 CSR_READ_4(sc, BGE_NVRAM_SWARB);
685
686 return (0);
687}
688
689/*
690 * Read a sequence of bytes from NVRAM.
691 */
692static int
693bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
694{
695 int err = 0, i;
696 uint8_t byte = 0;
697
698 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
699 return (1);
700
701 for (i = 0; i < cnt; i++) {
702 err = bge_nvram_getbyte(sc, off + i, &byte);
703 if (err)
704 break;
705 *(dest + i) = byte;
706 }
707
708 return (err ? 1 : 0);
709}
710
711/*
712 * Read a byte of data stored in the EEPROM at address 'addr.' The
713 * BCM570x supports both the traditional bitbang interface and an
714 * auto access interface for reading the EEPROM. We use the auto
715 * access method.
716 */
717static uint8_t
718bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
719{
720 int i;
721 uint32_t byte = 0;
722
723 /*
724 * Enable use of auto EEPROM access so we can avoid
725 * having to use the bitbang method.
726 */
727 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
728
729 /* Reset the EEPROM, load the clock period. */
730 CSR_WRITE_4(sc, BGE_EE_ADDR,
731 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
732 DELAY(20);
733
734 /* Issue the read EEPROM command. */
735 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
736
737 /* Wait for completion */
738 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
739 DELAY(10);
740 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
741 break;
742 }
743
744 if (i == BGE_TIMEOUT * 10) {
745 device_printf(sc->bge_dev, "EEPROM read timed out\n");
746 return (1);
747 }
748
749 /* Get result. */
750 byte = CSR_READ_4(sc, BGE_EE_DATA);
751
752 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
753
754 return (0);
755}
756
757/*
758 * Read a sequence of bytes from the EEPROM.
759 */
760static int
761bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
762{
763 int i, error = 0;
764 uint8_t byte = 0;
765
766 for (i = 0; i < cnt; i++) {
767 error = bge_eeprom_getbyte(sc, off + i, &byte);
768 if (error)
769 break;
770 *(dest + i) = byte;
771 }
772
773 return (error ? 1 : 0);
774}
775
776static int
777bge_miibus_readreg(device_t dev, int phy, int reg)
778{
779 struct bge_softc *sc;
780 uint32_t val;
781 int i;
782
783 sc = device_get_softc(dev);
784
785 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
786 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
787 CSR_WRITE_4(sc, BGE_MI_MODE,
788 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
789 DELAY(80);
790 }
791
792 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
793 BGE_MIPHY(phy) | BGE_MIREG(reg));
794
795 /* Poll for the PHY register access to complete. */
796 for (i = 0; i < BGE_TIMEOUT; i++) {
797 DELAY(10);
798 val = CSR_READ_4(sc, BGE_MI_COMM);
799 if ((val & BGE_MICOMM_BUSY) == 0) {
800 DELAY(5);
801 val = CSR_READ_4(sc, BGE_MI_COMM);
802 break;
803 }
804 }
805
806 if (i == BGE_TIMEOUT) {
807 device_printf(sc->bge_dev,
808 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
809 phy, reg, val);
810 val = 0;
811 }
812
813 /* Restore the autopoll bit if necessary. */
814 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
815 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
816 DELAY(80);
817 }
818
819 if (val & BGE_MICOMM_READFAIL)
820 return (0);
821
822 return (val & 0xFFFF);
823}
824
825static int
826bge_miibus_writereg(device_t dev, int phy, int reg, int val)
827{
828 struct bge_softc *sc;
829 int i;
830
831 sc = device_get_softc(dev);
832
833 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
834 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
835 return (0);
836
837 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
838 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
839 CSR_WRITE_4(sc, BGE_MI_MODE,
840 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
841 DELAY(80);
842 }
843
844 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
845 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
846
847 for (i = 0; i < BGE_TIMEOUT; i++) {
848 DELAY(10);
849 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
850 DELAY(5);
851 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
852 break;
853 }
854 }
855
856 /* Restore the autopoll bit if necessary. */
857 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
858 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
859 DELAY(80);
860 }
861
862 if (i == BGE_TIMEOUT)
863 device_printf(sc->bge_dev,
864 "PHY write timed out (phy %d, reg %d, val %d)\n",
865 phy, reg, val);
866
867 return (0);
868}
869
870static void
871bge_miibus_statchg(device_t dev)
872{
873 struct bge_softc *sc;
874 struct mii_data *mii;
875 sc = device_get_softc(dev);
876 mii = device_get_softc(sc->bge_miibus);
877
878 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
879 (IFM_ACTIVE | IFM_AVALID)) {
880 switch (IFM_SUBTYPE(mii->mii_media_active)) {
881 case IFM_10_T:
882 case IFM_100_TX:
883 sc->bge_link = 1;
884 break;
885 case IFM_1000_T:
886 case IFM_1000_SX:
887 case IFM_2500_SX:
888 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
889 sc->bge_link = 1;
890 else
891 sc->bge_link = 0;
892 break;
893 default:
894 sc->bge_link = 0;
895 break;
896 }
897 } else
898 sc->bge_link = 0;
899 if (sc->bge_link == 0)
900 return;
901 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
902 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
903 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
904 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
905 else
906 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
907
908 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
909 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
910 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG1)
911 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
912 else
913 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
914 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0)
915 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
916 else
917 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
918 } else {
919 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
920 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
921 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
922 }
923}
924
925/*
926 * Intialize a standard receive ring descriptor.
927 */
928static int
929bge_newbuf_std(struct bge_softc *sc, int i)
930{
931 struct mbuf *m;
932 struct bge_rx_bd *r;
933 bus_dma_segment_t segs[1];
934 bus_dmamap_t map;
935 int error, nsegs;
936
937 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
938 if (m == NULL)
939 return (ENOBUFS);
940 m->m_len = m->m_pkthdr.len = MCLBYTES;
941 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
942 m_adj(m, ETHER_ALIGN);
943
944 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
945 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
946 if (error != 0) {
947 m_freem(m);
948 return (error);
949 }
950 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
951 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
952 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
953 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
954 sc->bge_cdata.bge_rx_std_dmamap[i]);
955 }
956 map = sc->bge_cdata.bge_rx_std_dmamap[i];
957 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
958 sc->bge_cdata.bge_rx_std_sparemap = map;
959 sc->bge_cdata.bge_rx_std_chain[i] = m;
960 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
961 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
962 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
963 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
964 r->bge_flags = BGE_RXBDFLAG_END;
965 r->bge_len = segs[0].ds_len;
966 r->bge_idx = i;
967
968 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
969 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
970
971 return (0);
972}
973
974/*
975 * Initialize a jumbo receive ring descriptor. This allocates
976 * a jumbo buffer from the pool managed internally by the driver.
977 */
978static int
979bge_newbuf_jumbo(struct bge_softc *sc, int i)
980{
981 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
982 bus_dmamap_t map;
983 struct bge_extrx_bd *r;
984 struct mbuf *m;
985 int error, nsegs;
986
987 MGETHDR(m, M_DONTWAIT, MT_DATA);
988 if (m == NULL)
989 return (ENOBUFS);
990
991 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
992 if (!(m->m_flags & M_EXT)) {
993 m_freem(m);
994 return (ENOBUFS);
995 }
996 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
997 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
998 m_adj(m, ETHER_ALIGN);
999
1000 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1001 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1002 if (error != 0) {
1003 m_freem(m);
1004 return (error);
1005 }
1006
1007 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1008 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1009 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1010 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1011 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1012 }
1013 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1014 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1015 sc->bge_cdata.bge_rx_jumbo_sparemap;
1016 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1017 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1018 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1019 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1020 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1021 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1022
1023 /*
1024 * Fill in the extended RX buffer descriptor.
1025 */
1026 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1027 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1028 r->bge_idx = i;
1029 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1030 switch (nsegs) {
1031 case 4:
1032 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1033 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1034 r->bge_len3 = segs[3].ds_len;
1035 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1036 case 3:
1037 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1038 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1039 r->bge_len2 = segs[2].ds_len;
1040 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1041 case 2:
1042 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1043 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1044 r->bge_len1 = segs[1].ds_len;
1045 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1046 case 1:
1047 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1048 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1049 r->bge_len0 = segs[0].ds_len;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1051 break;
1052 default:
1053 panic("%s: %d segments\n", __func__, nsegs);
1054 }
1055
1056 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1057 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1058
1059 return (0);
1060}
1061
1062static int
1063bge_init_rx_ring_std(struct bge_softc *sc)
1064{
1065 int error, i;
1066
1067 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1068 sc->bge_std = 0;
1069 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1070 if ((error = bge_newbuf_std(sc, i)) != 0)
1071 return (error);
1072 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1073 }
1074
1075 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1076 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1077
1078 sc->bge_std = 0;
1079 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1080
1081 return (0);
1082}
1083
1084static void
1085bge_free_rx_ring_std(struct bge_softc *sc)
1086{
1087 int i;
1088
1089 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1090 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1091 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1092 sc->bge_cdata.bge_rx_std_dmamap[i],
1093 BUS_DMASYNC_POSTREAD);
1094 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1095 sc->bge_cdata.bge_rx_std_dmamap[i]);
1096 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1097 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1098 }
1099 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1100 sizeof(struct bge_rx_bd));
1101 }
1102}
1103
1104static int
1105bge_init_rx_ring_jumbo(struct bge_softc *sc)
1106{
1107 struct bge_rcb *rcb;
1108 int error, i;
1109
1110 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1111 sc->bge_jumbo = 0;
1112 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1113 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1114 return (error);
1115 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1116 }
1117
1118 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1119 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1120
1121 sc->bge_jumbo = 0;
1122
1123 /* Enable the jumbo receive producer ring. */
1124 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1125 rcb->bge_maxlen_flags =
1126 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1127 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1128
1129 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1130
1131 return (0);
1132}
1133
1134static void
1135bge_free_rx_ring_jumbo(struct bge_softc *sc)
1136{
1137 int i;
1138
1139 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1140 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1141 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1142 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1143 BUS_DMASYNC_POSTREAD);
1144 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1145 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1146 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1147 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1148 }
1149 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1150 sizeof(struct bge_extrx_bd));
1151 }
1152}
1153
1154static void
1155bge_free_tx_ring(struct bge_softc *sc)
1156{
1157 int i;
1158
1159 if (sc->bge_ldata.bge_tx_ring == NULL)
1160 return;
1161
1162 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1163 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1164 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1165 sc->bge_cdata.bge_tx_dmamap[i],
1166 BUS_DMASYNC_POSTWRITE);
1167 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1168 sc->bge_cdata.bge_tx_dmamap[i]);
1169 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1170 sc->bge_cdata.bge_tx_chain[i] = NULL;
1171 }
1172 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1173 sizeof(struct bge_tx_bd));
1174 }
1175}
1176
1177static int
1178bge_init_tx_ring(struct bge_softc *sc)
1179{
1180 sc->bge_txcnt = 0;
1181 sc->bge_tx_saved_considx = 0;
1182
1183 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1184 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1185 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1186
1187 /* Initialize transmit producer index for host-memory send ring. */
1188 sc->bge_tx_prodidx = 0;
1189 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1190
1191 /* 5700 b2 errata */
1192 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1193 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1194
1195 /* NIC-memory send ring not used; initialize to zero. */
1196 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1197 /* 5700 b2 errata */
1198 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1199 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1200
1201 return (0);
1202}
1203
1204static void
1205bge_setpromisc(struct bge_softc *sc)
1206{
1207 struct ifnet *ifp;
1208
1209 BGE_LOCK_ASSERT(sc);
1210
1211 ifp = sc->bge_ifp;
1212
1213 /* Enable or disable promiscuous mode as needed. */
1214 if (ifp->if_flags & IFF_PROMISC)
1215 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1216 else
1217 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1218}
1219
1220static void
1221bge_setmulti(struct bge_softc *sc)
1222{
1223 struct ifnet *ifp;
1224 struct ifmultiaddr *ifma;
1225 uint32_t hashes[4] = { 0, 0, 0, 0 };
1226 int h, i;
1227
1228 BGE_LOCK_ASSERT(sc);
1229
1230 ifp = sc->bge_ifp;
1231
1232 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1233 for (i = 0; i < 4; i++)
1234 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1235 return;
1236 }
1237
1238 /* First, zot all the existing filters. */
1239 for (i = 0; i < 4; i++)
1240 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1241
1242 /* Now program new ones. */
1243 if_maddr_rlock(ifp);
1244 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1245 if (ifma->ifma_addr->sa_family != AF_LINK)
1246 continue;
1247 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1248 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1249 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1250 }
1251 if_maddr_runlock(ifp);
1252
1253 for (i = 0; i < 4; i++)
1254 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1255}
1256
1257static void
1258bge_setvlan(struct bge_softc *sc)
1259{
1260 struct ifnet *ifp;
1261
1262 BGE_LOCK_ASSERT(sc);
1263
1264 ifp = sc->bge_ifp;
1265
1266 /* Enable or disable VLAN tag stripping as needed. */
1267 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1268 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1269 else
1270 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1271}
1272
1273static void
1274bge_sig_pre_reset(struct bge_softc *sc, int type)
1275{
1276
1277 /*
1278 * Some chips don't like this so only do this if ASF is enabled
1279 */
1280 if (sc->bge_asf_mode)
1281 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1282
1283 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1284 switch (type) {
1285 case BGE_RESET_START:
1286 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1287 break;
1288 case BGE_RESET_STOP:
1289 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1290 break;
1291 }
1292 }
1293}
1294
1295static void
1296bge_sig_post_reset(struct bge_softc *sc, int type)
1297{
1298
1299 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1300 switch (type) {
1301 case BGE_RESET_START:
1302 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1303 /* START DONE */
1304 break;
1305 case BGE_RESET_STOP:
1306 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1307 break;
1308 }
1309 }
1310}
1311
1312static void
1313bge_sig_legacy(struct bge_softc *sc, int type)
1314{
1315
1316 if (sc->bge_asf_mode) {
1317 switch (type) {
1318 case BGE_RESET_START:
1319 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1320 break;
1321 case BGE_RESET_STOP:
1322 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1323 break;
1324 }
1325 }
1326}
1327
1328static void
1329bge_stop_fw(struct bge_softc *sc)
1330{
1331 int i;
1332
1333 if (sc->bge_asf_mode) {
1334 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1335 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1336 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1337
1338 for (i = 0; i < 100; i++ ) {
1339 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1340 break;
1341 DELAY(10);
1342 }
1343 }
1344}
1345
1346/*
1347 * Do endian, PCI and DMA initialization.
1348 */
1349static int
1350bge_chipinit(struct bge_softc *sc)
1351{
1352 uint32_t dma_rw_ctl;
1353 uint16_t val;
1354 int i;
1355
1356 /* Set endianness before we access any non-PCI registers. */
1357 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1358
1359 /* Clear the MAC control register */
1360 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1361
1362 /*
1363 * Clear the MAC statistics block in the NIC's
1364 * internal memory.
1365 */
1366 for (i = BGE_STATS_BLOCK;
1367 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1368 BGE_MEMWIN_WRITE(sc, i, 0);
1369
1370 for (i = BGE_STATUS_BLOCK;
1371 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1372 BGE_MEMWIN_WRITE(sc, i, 0);
1373
1374 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1375 /*
1376 * Fix data corruption caused by non-qword write with WB.
1377 * Fix master abort in PCI mode.
1378 * Fix PCI latency timer.
1379 */
1380 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1381 val |= (1 << 10) | (1 << 12) | (1 << 13);
1382 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1383 }
1384
1385 /*
1386 * Set up the PCI DMA control register.
1387 */
1388 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1389 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1390 if (sc->bge_flags & BGE_FLAG_PCIE) {
1391 /* Read watermark not used, 128 bytes for write. */
1392 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1393 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1394 if (BGE_IS_5714_FAMILY(sc)) {
1395 /* 256 bytes for read and write. */
1396 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1397 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1398 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1399 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1400 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1401 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1402 /*
1403 * In the BCM5703, the DMA read watermark should
1404 * be set to less than or equal to the maximum
1405 * memory read byte count of the PCI-X command
1406 * register.
1407 */
1408 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1409 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1410 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1411 /* 1536 bytes for read, 384 bytes for write. */
1412 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1413 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1414 } else {
1415 /* 384 bytes for read and write. */
1416 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1417 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1418 0x0F;
1419 }
1420 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1421 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1422 uint32_t tmp;
1423
1424 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1425 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1426 if (tmp == 6 || tmp == 7)
1427 dma_rw_ctl |=
1428 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1429
1430 /* Set PCI-X DMA write workaround. */
1431 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1432 }
1433 } else {
1434 /* Conventional PCI bus: 256 bytes for read and write. */
1435 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1436 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1437
1438 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1439 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1440 dma_rw_ctl |= 0x0F;
1441 }
1442 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1443 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1444 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1445 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1446 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1447 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1448 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1449 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1450
1451 /*
1452 * Set up general mode register.
1453 */
1454 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1455 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1456 BGE_MODECTL_TX_NO_PHDR_CSUM);
1457
1458 /*
1459 * BCM5701 B5 have a bug causing data corruption when using
1460 * 64-bit DMA reads, which can be terminated early and then
1461 * completed later as 32-bit accesses, in combination with
1462 * certain bridges.
1463 */
1464 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1465 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1466 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1467
1468 /*
1469 * Tell the firmware the driver is running
1470 */
1471 if (sc->bge_asf_mode & ASF_STACKUP)
1472 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1473
1474 /*
1475 * Disable memory write invalidate. Apparently it is not supported
1476 * properly by these devices. Also ensure that INTx isn't disabled,
1477 * as these chips need it even when using MSI.
1478 */
1479 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1480 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1481
1482 /* Set the timer prescaler (always 66Mhz) */
1483 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1484
1485 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1486 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1487 DELAY(40); /* XXX */
1488
1489 /* Put PHY into ready state */
1490 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1491 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1492 DELAY(40);
1493 }
1494
1495 return (0);
1496}
1497
1498static int
1499bge_blockinit(struct bge_softc *sc)
1500{
1501 struct bge_rcb *rcb;
1502 bus_size_t vrcb;
1503 bge_hostaddr taddr;
1504 uint32_t val;
1505 int i, limit;
1506
1507 /*
1508 * Initialize the memory window pointer register so that
1509 * we can access the first 32K of internal NIC RAM. This will
1510 * allow us to set up the TX send ring RCBs and the RX return
1511 * ring RCBs, plus other things which live in NIC memory.
1512 */
1513 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1514
1515 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1516
1517 if (!(BGE_IS_5705_PLUS(sc))) {
1518 /* Configure mbuf memory pool */
1519 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1520 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1521 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1522 else
1523 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1524
1525 /* Configure DMA resource pool */
1526 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1527 BGE_DMA_DESCRIPTORS);
1528 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1529 }
1530
1531 /* Configure mbuf pool watermarks */
1532 if (!BGE_IS_5705_PLUS(sc)) {
1533 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1534 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1535 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1536 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1537 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1538 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1539 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1540 } else {
1541 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1542 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1543 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1544 }
1545
1546 /* Configure DMA resource watermarks */
1547 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1548 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1549
1550 /* Enable buffer manager */
1551 if (!(BGE_IS_5705_PLUS(sc))) {
1552 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1553 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1554
1555 /* Poll for buffer manager start indication */
1556 for (i = 0; i < BGE_TIMEOUT; i++) {
1557 DELAY(10);
1558 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1559 break;
1560 }
1561
1562 if (i == BGE_TIMEOUT) {
1563 device_printf(sc->bge_dev,
1564 "buffer manager failed to start\n");
1565 return (ENXIO);
1566 }
1567 }
1568
1569 /* Enable flow-through queues */
1570 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1571 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1572
1573 /* Wait until queue initialization is complete */
1574 for (i = 0; i < BGE_TIMEOUT; i++) {
1575 DELAY(10);
1576 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1577 break;
1578 }
1579
1580 if (i == BGE_TIMEOUT) {
1581 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1582 return (ENXIO);
1583 }
1584
1585 /*
1586 * Summary of rings supported by the controller:
1587 *
1588 * Standard Receive Producer Ring
1589 * - This ring is used to feed receive buffers for "standard"
1590 * sized frames (typically 1536 bytes) to the controller.
1591 *
1592 * Jumbo Receive Producer Ring
1593 * - This ring is used to feed receive buffers for jumbo sized
1594 * frames (i.e. anything bigger than the "standard" frames)
1595 * to the controller.
1596 *
1597 * Mini Receive Producer Ring
1598 * - This ring is used to feed receive buffers for "mini"
1599 * sized frames to the controller.
1600 * - This feature required external memory for the controller
1601 * but was never used in a production system. Should always
1602 * be disabled.
1603 *
1604 * Receive Return Ring
1605 * - After the controller has placed an incoming frame into a
1606 * receive buffer that buffer is moved into a receive return
1607 * ring. The driver is then responsible to passing the
1608 * buffer up to the stack. Many versions of the controller
1609 * support multiple RR rings.
1610 *
1611 * Send Ring
1612 * - This ring is used for outgoing frames. Many versions of
1613 * the controller support multiple send rings.
1614 */
1615
1616 /* Initialize the standard receive producer ring control block. */
1617 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1618 rcb->bge_hostaddr.bge_addr_lo =
1619 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1620 rcb->bge_hostaddr.bge_addr_hi =
1621 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1622 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1623 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1624 if (BGE_IS_5705_PLUS(sc)) {
1625 /*
1626 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1627 * Bits 15-2 : Reserved (should be 0)
1628 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1629 * Bit 0 : Reserved
1630 */
1631 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1632 } else {
1633 /*
1634 * Ring size is always XXX entries
1635 * Bits 31-16: Maximum RX frame size
1636 * Bits 15-2 : Reserved (should be 0)
1637 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1638 * Bit 0 : Reserved
1639 */
1640 rcb->bge_maxlen_flags =
1641 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1642 }
1643 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1644 /* Write the standard receive producer ring control block. */
1645 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1646 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1647 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1648 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1649
1650 /* Reset the standard receive producer ring producer index. */
1651 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1652
1653 /*
1654 * Initialize the jumbo RX producer ring control
1655 * block. We set the 'ring disabled' bit in the
1656 * flags field until we're actually ready to start
1657 * using this ring (i.e. once we set the MTU
1658 * high enough to require it).
1659 */
1660 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1661 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1662 /* Get the jumbo receive producer ring RCB parameters. */
1663 rcb->bge_hostaddr.bge_addr_lo =
1664 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1665 rcb->bge_hostaddr.bge_addr_hi =
1666 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1667 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1668 sc->bge_cdata.bge_rx_jumbo_ring_map,
1669 BUS_DMASYNC_PREREAD);
1670 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1671 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1672 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1673 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1674 rcb->bge_hostaddr.bge_addr_hi);
1675 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1676 rcb->bge_hostaddr.bge_addr_lo);
1677 /* Program the jumbo receive producer ring RCB parameters. */
1678 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1679 rcb->bge_maxlen_flags);
1680 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1681 /* Reset the jumbo receive producer ring producer index. */
1682 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1683 }
1684
1685 /* Disable the mini receive producer ring RCB. */
1686 if (BGE_IS_5700_FAMILY(sc)) {
1687 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1688 rcb->bge_maxlen_flags =
1689 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1690 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1691 rcb->bge_maxlen_flags);
1692 /* Reset the mini receive producer ring producer index. */
1693 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1694 }
1695
1696 /* Choose de-pipeline mode for BCM5906 A1. */
1697 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1698 sc->bge_chiprev == BGE_CHIPID_BCM5906_A1)
1699 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1700 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1696 /*
1697 * The BD ring replenish thresholds control how often the
1698 * hardware fetches new BD's from the producer rings in host
1699 * memory. Setting the value too low on a busy system can
1700 * starve the hardware and recue the throughpout.
1701 *
1702 * Set the BD ring replentish thresholds. The recommended
1703 * values are 1/8th the number of descriptors allocated to
1704 * each ring.
1705 * XXX The 5754 requires a lower threshold, so it might be a
1706 * requirement of all 575x family chips. The Linux driver sets
1707 * the lower threshold for all 5705 family chips as well, but there
1708 * are reports that it might not need to be so strict.
1709 *
1710 * XXX Linux does some extra fiddling here for the 5906 parts as
1711 * well.
1712 */
1713 if (BGE_IS_5705_PLUS(sc))
1714 val = 8;
1715 else
1716 val = BGE_STD_RX_RING_CNT / 8;
1717 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1718 if (BGE_IS_JUMBO_CAPABLE(sc))
1719 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1720 BGE_JUMBO_RX_RING_CNT/8);
1721
1722 /*
1723 * Disable all send rings by setting the 'ring disabled' bit
1724 * in the flags field of all the TX send ring control blocks,
1725 * located in NIC memory.
1726 */
1727 if (!BGE_IS_5705_PLUS(sc))
1728 /* 5700 to 5704 had 16 send rings. */
1729 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1730 else
1731 limit = 1;
1732 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1733 for (i = 0; i < limit; i++) {
1734 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1735 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1736 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1737 vrcb += sizeof(struct bge_rcb);
1738 }
1739
1740 /* Configure send ring RCB 0 (we use only the first ring) */
1741 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1742 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1743 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1744 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1745 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1746 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1747 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1748 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1749
1750 /*
1751 * Disable all receive return rings by setting the
1752 * 'ring diabled' bit in the flags field of all the receive
1753 * return ring control blocks, located in NIC memory.
1754 */
1755 if (!BGE_IS_5705_PLUS(sc))
1756 limit = BGE_RX_RINGS_MAX;
1757 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1758 limit = 4;
1759 else
1760 limit = 1;
1761 /* Disable all receive return rings. */
1762 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1763 for (i = 0; i < limit; i++) {
1764 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1765 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1766 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1767 BGE_RCB_FLAG_RING_DISABLED);
1768 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1769 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1770 (i * (sizeof(uint64_t))), 0);
1771 vrcb += sizeof(struct bge_rcb);
1772 }
1773
1774 /*
1775 * Set up receive return ring 0. Note that the NIC address
1776 * for RX return rings is 0x0. The return rings live entirely
1777 * within the host, so the nicaddr field in the RCB isn't used.
1778 */
1779 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1780 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1781 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1782 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1783 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1784 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1785 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1786
1787 /* Set random backoff seed for TX */
1788 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1789 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1790 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1791 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1792 BGE_TX_BACKOFF_SEED_MASK);
1793
1794 /* Set inter-packet gap */
1795 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1796
1797 /*
1798 * Specify which ring to use for packets that don't match
1799 * any RX rules.
1800 */
1801 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1802
1803 /*
1804 * Configure number of RX lists. One interrupt distribution
1805 * list, sixteen active lists, one bad frames class.
1806 */
1807 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1808
1809 /* Inialize RX list placement stats mask. */
1810 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1811 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1812
1813 /* Disable host coalescing until we get it set up */
1814 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1815
1816 /* Poll to make sure it's shut down. */
1817 for (i = 0; i < BGE_TIMEOUT; i++) {
1818 DELAY(10);
1819 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1820 break;
1821 }
1822
1823 if (i == BGE_TIMEOUT) {
1824 device_printf(sc->bge_dev,
1825 "host coalescing engine failed to idle\n");
1826 return (ENXIO);
1827 }
1828
1829 /* Set up host coalescing defaults */
1830 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1831 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1832 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1833 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1834 if (!(BGE_IS_5705_PLUS(sc))) {
1835 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1836 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1837 }
1838 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1839 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1840
1841 /* Set up address of statistics block */
1842 if (!(BGE_IS_5705_PLUS(sc))) {
1843 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1844 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1845 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1846 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1847 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1848 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1849 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1850 }
1851
1852 /* Set up address of status block */
1853 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1854 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1855 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1856 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1857
1858 /* Set up status block size. */
1859 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1860 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1861 val = BGE_STATBLKSZ_FULL;
1862 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1863 } else {
1864 val = BGE_STATBLKSZ_32BYTE;
1865 bzero(sc->bge_ldata.bge_status_block, 32);
1866 }
1867 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1868 sc->bge_cdata.bge_status_map,
1869 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1870
1871 /* Turn on host coalescing state machine */
1872 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1873
1874 /* Turn on RX BD completion state machine and enable attentions */
1875 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1876 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1877
1878 /* Turn on RX list placement state machine */
1879 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1880
1881 /* Turn on RX list selector state machine. */
1882 if (!(BGE_IS_5705_PLUS(sc)))
1883 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1884
1885 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1886 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1887 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1888 BGE_MACMODE_FRMHDR_DMA_ENB;
1889
1890 if (sc->bge_flags & BGE_FLAG_TBI)
1891 val |= BGE_PORTMODE_TBI;
1892 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1893 val |= BGE_PORTMODE_GMII;
1894 else
1895 val |= BGE_PORTMODE_MII;
1896
1897 /* Turn on DMA, clear stats */
1898 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1899
1900 /* Set misc. local control, enable interrupts on attentions */
1901 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1902
1903#ifdef notdef
1904 /* Assert GPIO pins for PHY reset */
1905 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1906 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1907 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1908 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1909#endif
1910
1911 /* Turn on DMA completion state machine */
1912 if (!(BGE_IS_5705_PLUS(sc)))
1913 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1914
1915 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1916
1917 /* Enable host coalescing bug fix. */
1918 if (BGE_IS_5755_PLUS(sc))
1919 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1920
1921 /* Request larger DMA burst size to get better performance. */
1922 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1923 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1924
1925 /* Turn on write DMA state machine */
1926 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1927 DELAY(40);
1928
1929 /* Turn on read DMA state machine */
1930 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1931 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1932 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1933 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1934 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1935 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1936 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1937 if (sc->bge_flags & BGE_FLAG_PCIE)
1938 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1939 if (sc->bge_flags & BGE_FLAG_TSO) {
1940 val |= BGE_RDMAMODE_TSO4_ENABLE;
1941 if (sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1942 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1943 val |= BGE_RDMAMODE_TSO6_ENABLE;
1944 }
1945 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1946 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1947 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1948 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1949 /*
1950 * Enable fix for read DMA FIFO overruns.
1951 * The fix is to limit the number of RX BDs
1952 * the hardware would fetch at a fime.
1953 */
1954 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1955 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
1956 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1957 }
1958 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1959 DELAY(40);
1960
1961 /* Turn on RX data completion state machine */
1962 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1963
1964 /* Turn on RX BD initiator state machine */
1965 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1966
1967 /* Turn on RX data and RX BD initiator state machine */
1968 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1969
1970 /* Turn on Mbuf cluster free state machine */
1971 if (!(BGE_IS_5705_PLUS(sc)))
1972 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1973
1974 /* Turn on send BD completion state machine */
1975 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1976
1977 /* Turn on send data completion state machine */
1978 val = BGE_SDCMODE_ENABLE;
1979 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1980 val |= BGE_SDCMODE_CDELAY;
1981 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1982
1983 /* Turn on send data initiator state machine */
1984 if (sc->bge_flags & BGE_FLAG_TSO)
1985 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1986 else
1987 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1988
1989 /* Turn on send BD initiator state machine */
1990 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1991
1992 /* Turn on send BD selector state machine */
1993 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1994
1995 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1996 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1997 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1998
1999 /* ack/clear link change events */
2000 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2001 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2002 BGE_MACSTAT_LINK_CHANGED);
2003 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2004
2005 /*
2006 * Enable attention when the link has changed state for
2007 * devices that use auto polling.
2008 */
2009 if (sc->bge_flags & BGE_FLAG_TBI) {
2010 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2011 } else {
2012 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2013 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2014 DELAY(80);
2015 }
2016 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2017 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2018 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2019 BGE_EVTENB_MI_INTERRUPT);
2020 }
2021
2022 /*
2023 * Clear any pending link state attention.
2024 * Otherwise some link state change events may be lost until attention
2025 * is cleared by bge_intr() -> bge_link_upd() sequence.
2026 * It's not necessary on newer BCM chips - perhaps enabling link
2027 * state change attentions implies clearing pending attention.
2028 */
2029 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2030 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2031 BGE_MACSTAT_LINK_CHANGED);
2032
2033 /* Enable link state change attentions. */
2034 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2035
2036 return (0);
2037}
2038
2039const struct bge_revision *
2040bge_lookup_rev(uint32_t chipid)
2041{
2042 const struct bge_revision *br;
2043
2044 for (br = bge_revisions; br->br_name != NULL; br++) {
2045 if (br->br_chipid == chipid)
2046 return (br);
2047 }
2048
2049 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2050 if (br->br_chipid == BGE_ASICREV(chipid))
2051 return (br);
2052 }
2053
2054 return (NULL);
2055}
2056
2057const struct bge_vendor *
2058bge_lookup_vendor(uint16_t vid)
2059{
2060 const struct bge_vendor *v;
2061
2062 for (v = bge_vendors; v->v_name != NULL; v++)
2063 if (v->v_id == vid)
2064 return (v);
2065
2066 panic("%s: unknown vendor %d", __func__, vid);
2067 return (NULL);
2068}
2069
2070/*
2071 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2072 * against our list and return its name if we find a match.
2073 *
2074 * Note that since the Broadcom controller contains VPD support, we
2075 * try to get the device name string from the controller itself instead
2076 * of the compiled-in string. It guarantees we'll always announce the
2077 * right product name. We fall back to the compiled-in string when
2078 * VPD is unavailable or corrupt.
2079 */
2080static int
2081bge_probe(device_t dev)
2082{
2083 const struct bge_type *t = bge_devs;
2084 struct bge_softc *sc = device_get_softc(dev);
2085 uint16_t vid, did;
2086
2087 sc->bge_dev = dev;
2088 vid = pci_get_vendor(dev);
2089 did = pci_get_device(dev);
2090 while(t->bge_vid != 0) {
2091 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2092 char model[64], buf[96];
2093 const struct bge_revision *br;
2094 const struct bge_vendor *v;
2095 uint32_t id;
2096
2097 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2098 BGE_PCIMISCCTL_ASICREV_SHIFT;
2099 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
2100 id = pci_read_config(dev,
2101 BGE_PCI_PRODID_ASICREV, 4);
2102 br = bge_lookup_rev(id);
2103 v = bge_lookup_vendor(vid);
2104 {
2105#if __FreeBSD_version > 700024
2106 const char *pname;
2107
2108 if (bge_has_eaddr(sc) &&
2109 pci_get_vpd_ident(dev, &pname) == 0)
2110 snprintf(model, 64, "%s", pname);
2111 else
2112#endif
2113 snprintf(model, 64, "%s %s",
2114 v->v_name,
2115 br != NULL ? br->br_name :
2116 "NetXtreme Ethernet Controller");
2117 }
2118 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2119 br != NULL ? "" : "unknown ", id);
2120 device_set_desc_copy(dev, buf);
2121 return (0);
2122 }
2123 t++;
2124 }
2125
2126 return (ENXIO);
2127}
2128
2129static void
2130bge_dma_free(struct bge_softc *sc)
2131{
2132 int i;
2133
2134 /* Destroy DMA maps for RX buffers. */
2135 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2136 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2137 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2138 sc->bge_cdata.bge_rx_std_dmamap[i]);
2139 }
2140 if (sc->bge_cdata.bge_rx_std_sparemap)
2141 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2142 sc->bge_cdata.bge_rx_std_sparemap);
2143
2144 /* Destroy DMA maps for jumbo RX buffers. */
2145 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2146 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2147 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2148 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2149 }
2150 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2151 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2152 sc->bge_cdata.bge_rx_jumbo_sparemap);
2153
2154 /* Destroy DMA maps for TX buffers. */
2155 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2156 if (sc->bge_cdata.bge_tx_dmamap[i])
2157 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2158 sc->bge_cdata.bge_tx_dmamap[i]);
2159 }
2160
2161 if (sc->bge_cdata.bge_rx_mtag)
2162 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2163 if (sc->bge_cdata.bge_tx_mtag)
2164 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2165
2166
2167 /* Destroy standard RX ring. */
2168 if (sc->bge_cdata.bge_rx_std_ring_map)
2169 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2170 sc->bge_cdata.bge_rx_std_ring_map);
2171 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2172 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2173 sc->bge_ldata.bge_rx_std_ring,
2174 sc->bge_cdata.bge_rx_std_ring_map);
2175
2176 if (sc->bge_cdata.bge_rx_std_ring_tag)
2177 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2178
2179 /* Destroy jumbo RX ring. */
2180 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2181 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2182 sc->bge_cdata.bge_rx_jumbo_ring_map);
2183
2184 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2185 sc->bge_ldata.bge_rx_jumbo_ring)
2186 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2187 sc->bge_ldata.bge_rx_jumbo_ring,
2188 sc->bge_cdata.bge_rx_jumbo_ring_map);
2189
2190 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2191 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2192
2193 /* Destroy RX return ring. */
2194 if (sc->bge_cdata.bge_rx_return_ring_map)
2195 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2196 sc->bge_cdata.bge_rx_return_ring_map);
2197
2198 if (sc->bge_cdata.bge_rx_return_ring_map &&
2199 sc->bge_ldata.bge_rx_return_ring)
2200 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2201 sc->bge_ldata.bge_rx_return_ring,
2202 sc->bge_cdata.bge_rx_return_ring_map);
2203
2204 if (sc->bge_cdata.bge_rx_return_ring_tag)
2205 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2206
2207 /* Destroy TX ring. */
2208 if (sc->bge_cdata.bge_tx_ring_map)
2209 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2210 sc->bge_cdata.bge_tx_ring_map);
2211
2212 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2213 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2214 sc->bge_ldata.bge_tx_ring,
2215 sc->bge_cdata.bge_tx_ring_map);
2216
2217 if (sc->bge_cdata.bge_tx_ring_tag)
2218 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2219
2220 /* Destroy status block. */
2221 if (sc->bge_cdata.bge_status_map)
2222 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2223 sc->bge_cdata.bge_status_map);
2224
2225 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2226 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2227 sc->bge_ldata.bge_status_block,
2228 sc->bge_cdata.bge_status_map);
2229
2230 if (sc->bge_cdata.bge_status_tag)
2231 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2232
2233 /* Destroy statistics block. */
2234 if (sc->bge_cdata.bge_stats_map)
2235 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2236 sc->bge_cdata.bge_stats_map);
2237
2238 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2239 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2240 sc->bge_ldata.bge_stats,
2241 sc->bge_cdata.bge_stats_map);
2242
2243 if (sc->bge_cdata.bge_stats_tag)
2244 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2245
2246 if (sc->bge_cdata.bge_buffer_tag)
2247 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2248
2249 /* Destroy the parent tag. */
2250 if (sc->bge_cdata.bge_parent_tag)
2251 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2252}
2253
2254static int
2255bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2256 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2257 bus_addr_t *paddr, const char *msg)
2258{
2259 struct bge_dmamap_arg ctx;
2260 bus_addr_t lowaddr;
2261 bus_size_t ring_end;
2262 int error;
2263
2264 lowaddr = BUS_SPACE_MAXADDR;
2265again:
2266 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2267 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2268 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2269 if (error != 0) {
2270 device_printf(sc->bge_dev,
2271 "could not create %s dma tag\n", msg);
2272 return (ENOMEM);
2273 }
2274 /* Allocate DMA'able memory for ring. */
2275 error = bus_dmamem_alloc(*tag, (void **)ring,
2276 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2277 if (error != 0) {
2278 device_printf(sc->bge_dev,
2279 "could not allocate DMA'able memory for %s\n", msg);
2280 return (ENOMEM);
2281 }
2282 /* Load the address of the ring. */
2283 ctx.bge_busaddr = 0;
2284 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2285 &ctx, BUS_DMA_NOWAIT);
2286 if (error != 0) {
2287 device_printf(sc->bge_dev,
2288 "could not load DMA'able memory for %s\n", msg);
2289 return (ENOMEM);
2290 }
2291 *paddr = ctx.bge_busaddr;
2292 ring_end = *paddr + maxsize;
2293 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2294 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2295 /*
2296 * 4GB boundary crossed. Limit maximum allowable DMA
2297 * address space to 32bit and try again.
2298 */
2299 bus_dmamap_unload(*tag, *map);
2300 bus_dmamem_free(*tag, *ring, *map);
2301 bus_dma_tag_destroy(*tag);
2302 if (bootverbose)
2303 device_printf(sc->bge_dev, "4GB boundary crossed, "
2304 "limit DMA address space to 32bit for %s\n", msg);
2305 *ring = NULL;
2306 *tag = NULL;
2307 *map = NULL;
2308 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2309 goto again;
2310 }
2311 return (0);
2312}
2313
2314static int
2315bge_dma_alloc(struct bge_softc *sc)
2316{
2317 bus_addr_t lowaddr;
2318 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2319 int i, error;
2320
2321 lowaddr = BUS_SPACE_MAXADDR;
2322 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2323 lowaddr = BGE_DMA_MAXADDR;
2324 /*
2325 * Allocate the parent bus DMA tag appropriate for PCI.
2326 */
2327 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2328 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2329 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2330 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2331 if (error != 0) {
2332 device_printf(sc->bge_dev,
2333 "could not allocate parent dma tag\n");
2334 return (ENOMEM);
2335 }
2336
2337 /* Create tag for standard RX ring. */
2338 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2339 &sc->bge_cdata.bge_rx_std_ring_tag,
2340 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2341 &sc->bge_cdata.bge_rx_std_ring_map,
2342 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2343 if (error)
2344 return (error);
2345
2346 /* Create tag for RX return ring. */
2347 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2348 &sc->bge_cdata.bge_rx_return_ring_tag,
2349 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2350 &sc->bge_cdata.bge_rx_return_ring_map,
2351 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2352 if (error)
2353 return (error);
2354
2355 /* Create tag for TX ring. */
2356 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2357 &sc->bge_cdata.bge_tx_ring_tag,
2358 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2359 &sc->bge_cdata.bge_tx_ring_map,
2360 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2361 if (error)
2362 return (error);
2363
2364 /*
2365 * Create tag for status block.
2366 * Because we only use single Tx/Rx/Rx return ring, use
2367 * minimum status block size except BCM5700 AX/BX which
2368 * seems to want to see full status block size regardless
2369 * of configured number of ring.
2370 */
2371 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2372 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2373 sbsz = BGE_STATUS_BLK_SZ;
2374 else
2375 sbsz = 32;
2376 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2377 &sc->bge_cdata.bge_status_tag,
2378 (uint8_t **)&sc->bge_ldata.bge_status_block,
2379 &sc->bge_cdata.bge_status_map,
2380 &sc->bge_ldata.bge_status_block_paddr, "status block");
2381 if (error)
2382 return (error);
2383
2384 /* Create tag for statistics block. */
2385 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2386 &sc->bge_cdata.bge_stats_tag,
2387 (uint8_t **)&sc->bge_ldata.bge_stats,
2388 &sc->bge_cdata.bge_stats_map,
2389 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2390 if (error)
2391 return (error);
2392
2393 /* Create tag for jumbo RX ring. */
2394 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2395 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2396 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2397 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2398 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2399 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2400 if (error)
2401 return (error);
2402 }
2403
2404 /* Create parent tag for buffers. */
2405 boundary = 0;
2406 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2407 boundary = BGE_DMA_BNDRY;
2408 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2409 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2410 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2411 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2412 if (error != 0) {
2413 device_printf(sc->bge_dev,
2414 "could not allocate buffer dma tag\n");
2415 return (ENOMEM);
2416 }
2417 /* Create tag for Tx mbufs. */
2418 if (sc->bge_flags & BGE_FLAG_TSO) {
2419 txsegsz = BGE_TSOSEG_SZ;
2420 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2421 } else {
2422 txsegsz = MCLBYTES;
2423 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2424 }
2425 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2426 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2427 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2428 &sc->bge_cdata.bge_tx_mtag);
2429
2430 if (error) {
2431 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2432 return (ENOMEM);
2433 }
2434
2435 /* Create tag for Rx mbufs. */
2436 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2437 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2438 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2439
2440 if (error) {
2441 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2442 return (ENOMEM);
2443 }
2444
2445 /* Create DMA maps for RX buffers. */
2446 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2447 &sc->bge_cdata.bge_rx_std_sparemap);
2448 if (error) {
2449 device_printf(sc->bge_dev,
2450 "can't create spare DMA map for RX\n");
2451 return (ENOMEM);
2452 }
2453 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2454 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2455 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2456 if (error) {
2457 device_printf(sc->bge_dev,
2458 "can't create DMA map for RX\n");
2459 return (ENOMEM);
2460 }
2461 }
2462
2463 /* Create DMA maps for TX buffers. */
2464 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2465 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2466 &sc->bge_cdata.bge_tx_dmamap[i]);
2467 if (error) {
2468 device_printf(sc->bge_dev,
2469 "can't create DMA map for TX\n");
2470 return (ENOMEM);
2471 }
2472 }
2473
2474 /* Create tags for jumbo RX buffers. */
2475 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2476 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2477 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2478 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2479 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2480 if (error) {
2481 device_printf(sc->bge_dev,
2482 "could not allocate jumbo dma tag\n");
2483 return (ENOMEM);
2484 }
2485 /* Create DMA maps for jumbo RX buffers. */
2486 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2487 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2488 if (error) {
2489 device_printf(sc->bge_dev,
2490 "can't create spare DMA map for jumbo RX\n");
2491 return (ENOMEM);
2492 }
2493 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2494 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2495 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2496 if (error) {
2497 device_printf(sc->bge_dev,
2498 "can't create DMA map for jumbo RX\n");
2499 return (ENOMEM);
2500 }
2501 }
2502 }
2503
2504 return (0);
2505}
2506
2507/*
2508 * Return true if this device has more than one port.
2509 */
2510static int
2511bge_has_multiple_ports(struct bge_softc *sc)
2512{
2513 device_t dev = sc->bge_dev;
2514 u_int b, d, f, fscan, s;
2515
2516 d = pci_get_domain(dev);
2517 b = pci_get_bus(dev);
2518 s = pci_get_slot(dev);
2519 f = pci_get_function(dev);
2520 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2521 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2522 return (1);
2523 return (0);
2524}
2525
2526/*
2527 * Return true if MSI can be used with this device.
2528 */
2529static int
2530bge_can_use_msi(struct bge_softc *sc)
2531{
2532 int can_use_msi = 0;
2533
2534 switch (sc->bge_asicrev) {
2535 case BGE_ASICREV_BCM5714_A0:
2536 case BGE_ASICREV_BCM5714:
2537 /*
2538 * Apparently, MSI doesn't work when these chips are
2539 * configured in single-port mode.
2540 */
2541 if (bge_has_multiple_ports(sc))
2542 can_use_msi = 1;
2543 break;
2544 case BGE_ASICREV_BCM5750:
2545 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2546 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2547 can_use_msi = 1;
2548 break;
2549 default:
2550 if (BGE_IS_575X_PLUS(sc))
2551 can_use_msi = 1;
2552 }
2553 return (can_use_msi);
2554}
2555
2556static int
2557bge_attach(device_t dev)
2558{
2559 struct ifnet *ifp;
2560 struct bge_softc *sc;
2561 uint32_t hwcfg = 0, misccfg;
2562 u_char eaddr[ETHER_ADDR_LEN];
2563 int error, msicount, phy_addr, reg, rid, trys;
2564
2565 sc = device_get_softc(dev);
2566 sc->bge_dev = dev;
2567
2568 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2569
2570 /*
2571 * Map control/status registers.
2572 */
2573 pci_enable_busmaster(dev);
2574
2575 rid = PCIR_BAR(0);
2576 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2577 RF_ACTIVE);
2578
2579 if (sc->bge_res == NULL) {
2580 device_printf (sc->bge_dev, "couldn't map memory\n");
2581 error = ENXIO;
2582 goto fail;
2583 }
2584
2585 /* Save various chip information. */
2586 sc->bge_chipid =
2587 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2588 BGE_PCIMISCCTL_ASICREV_SHIFT;
2589 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2590 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2591 4);
2592 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2593 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2594
2595 /* Set default PHY address. */
2596 phy_addr = 1;
2597
2598 /*
2599 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2600 * 5705 A0 and A1 chips.
2601 */
2602 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2603 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2604 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2605 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2606 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2607
2608 if (bge_has_eaddr(sc))
2609 sc->bge_flags |= BGE_FLAG_EADDR;
2610
2611 /* Save chipset family. */
2612 switch (sc->bge_asicrev) {
2613 case BGE_ASICREV_BCM5755:
2614 case BGE_ASICREV_BCM5761:
2615 case BGE_ASICREV_BCM5784:
2616 case BGE_ASICREV_BCM5785:
2617 case BGE_ASICREV_BCM5787:
2618 case BGE_ASICREV_BCM57780:
2619 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2620 BGE_FLAG_5705_PLUS;
2621 break;
2622 case BGE_ASICREV_BCM5700:
2623 case BGE_ASICREV_BCM5701:
2624 case BGE_ASICREV_BCM5703:
2625 case BGE_ASICREV_BCM5704:
2626 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2627 break;
2628 case BGE_ASICREV_BCM5714_A0:
2629 case BGE_ASICREV_BCM5780:
2630 case BGE_ASICREV_BCM5714:
2631 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2632 /* FALLTHROUGH */
2633 case BGE_ASICREV_BCM5750:
2634 case BGE_ASICREV_BCM5752:
2635 case BGE_ASICREV_BCM5906:
2636 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2637 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2638 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2639 /* FALLTHROUGH */
2640 case BGE_ASICREV_BCM5705:
2641 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2642 break;
2643 }
2644
2645 /* Set various PHY bug flags. */
2646 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2647 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2648 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2649 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2650 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2651 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2652 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2653 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2654 if (pci_get_subvendor(dev) == DELL_VENDORID)
2655 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2656 if ((BGE_IS_5705_PLUS(sc)) &&
2657 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2658 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2659 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2660 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2661 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2662 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2663 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2664 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2665 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2666 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2667 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2668 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2669 } else
2670 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2671 }
2672
2673 /* Identify the chips that use an CPMU. */
2674 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2675 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2676 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2677 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2678 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2679 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2680 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2681 else
2682 sc->bge_mi_mode = BGE_MIMODE_BASE;
2683 /* Enable auto polling for BCM570[0-5]. */
2684 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2685 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2686
2687 /*
2688 * All controllers that are not 5755 or higher have 4GB
2689 * boundary DMA bug.
2690 * Whenever an address crosses a multiple of the 4GB boundary
2691 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2692 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2693 * state machine will lockup and cause the device to hang.
2694 */
2695 if (BGE_IS_5755_PLUS(sc) == 0)
2696 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2697
2698 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2699 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2700 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2701 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2702 sc->bge_flags |= BGE_FLAG_5788;
2703 }
2704
2705 /*
2706 * Some controllers seem to require a special firmware to use
2707 * TSO. But the firmware is not available to FreeBSD and Linux
2708 * claims that the TSO performed by the firmware is slower than
2709 * hardware based TSO. Moreover the firmware based TSO has one
2710 * known bug which can't handle TSO if ethernet header + IP/TCP
2711 * header is greater than 80 bytes. The workaround for the TSO
2712 * bug exist but it seems it's too expensive than not using
2713 * TSO at all. Some hardwares also have the TSO bug so limit
2714 * the TSO to the controllers that are not affected TSO issues
2715 * (e.g. 5755 or higher).
2716 */
2717 if (BGE_IS_5755_PLUS(sc)) {
2718 /*
2719 * BCM5754 and BCM5787 shares the same ASIC id so
2720 * explicit device id check is required.
2721 * Due to unknown reason TSO does not work on BCM5755M.
2722 */
2723 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2724 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2725 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2726 sc->bge_flags |= BGE_FLAG_TSO;
2727 }
2728
2729 /*
2730 * Check if this is a PCI-X or PCI Express device.
2731 */
2732 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2733 /*
2734 * Found a PCI Express capabilities register, this
2735 * must be a PCI Express device.
2736 */
2737 sc->bge_flags |= BGE_FLAG_PCIE;
2738 sc->bge_expcap = reg;
2739 if (pci_get_max_read_req(dev) != 4096)
2740 pci_set_max_read_req(dev, 4096);
2741 } else {
2742 /*
2743 * Check if the device is in PCI-X Mode.
2744 * (This bit is not valid on PCI Express controllers.)
2745 */
2746 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2747 sc->bge_pcixcap = reg;
2748 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2749 BGE_PCISTATE_PCI_BUSMODE) == 0)
2750 sc->bge_flags |= BGE_FLAG_PCIX;
2751 }
2752
2753 /*
2754 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2755 * not actually a MAC controller bug but an issue with the embedded
2756 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2757 */
2758 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2759 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2760 /*
2761 * Allocate the interrupt, using MSI if possible. These devices
2762 * support 8 MSI messages, but only the first one is used in
2763 * normal operation.
2764 */
2765 rid = 0;
2766 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2767 sc->bge_msicap = reg;
2768 if (bge_can_use_msi(sc)) {
2769 msicount = pci_msi_count(dev);
2770 if (msicount > 1)
2771 msicount = 1;
2772 } else
2773 msicount = 0;
2774 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2775 rid = 1;
2776 sc->bge_flags |= BGE_FLAG_MSI;
2777 }
2778 }
2779
2780 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2781 RF_SHAREABLE | RF_ACTIVE);
2782
2783 if (sc->bge_irq == NULL) {
2784 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2785 error = ENXIO;
2786 goto fail;
2787 }
2788
2789 device_printf(dev,
2790 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2791 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2792 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2793 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2794
2795 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2796
2797 /* Try to reset the chip. */
2798 if (bge_reset(sc)) {
2799 device_printf(sc->bge_dev, "chip reset failed\n");
2800 error = ENXIO;
2801 goto fail;
2802 }
2803
2804 sc->bge_asf_mode = 0;
2805 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2806 == BGE_MAGIC_NUMBER)) {
2807 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2808 & BGE_HWCFG_ASF) {
2809 sc->bge_asf_mode |= ASF_ENABLE;
2810 sc->bge_asf_mode |= ASF_STACKUP;
2811 if (BGE_IS_575X_PLUS(sc))
2812 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2813 }
2814 }
2815
2816 /* Try to reset the chip again the nice way. */
2817 bge_stop_fw(sc);
2818 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2819 if (bge_reset(sc)) {
2820 device_printf(sc->bge_dev, "chip reset failed\n");
2821 error = ENXIO;
2822 goto fail;
2823 }
2824
2825 bge_sig_legacy(sc, BGE_RESET_STOP);
2826 bge_sig_post_reset(sc, BGE_RESET_STOP);
2827
2828 if (bge_chipinit(sc)) {
2829 device_printf(sc->bge_dev, "chip initialization failed\n");
2830 error = ENXIO;
2831 goto fail;
2832 }
2833
2834 error = bge_get_eaddr(sc, eaddr);
2835 if (error) {
2836 device_printf(sc->bge_dev,
2837 "failed to read station address\n");
2838 error = ENXIO;
2839 goto fail;
2840 }
2841
2842 /* 5705 limits RX return ring to 512 entries. */
2843 if (BGE_IS_5705_PLUS(sc))
2844 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2845 else
2846 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2847
2848 if (bge_dma_alloc(sc)) {
2849 device_printf(sc->bge_dev,
2850 "failed to allocate DMA resources\n");
2851 error = ENXIO;
2852 goto fail;
2853 }
2854
2855 bge_add_sysctls(sc);
2856
2857 /* Set default tuneable values. */
2858 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2859 sc->bge_rx_coal_ticks = 150;
2860 sc->bge_tx_coal_ticks = 150;
2861 sc->bge_rx_max_coal_bds = 10;
2862 sc->bge_tx_max_coal_bds = 10;
2863
2864 /* Initialize checksum features to use. */
2865 sc->bge_csum_features = BGE_CSUM_FEATURES;
2866 if (sc->bge_forced_udpcsum != 0)
2867 sc->bge_csum_features |= CSUM_UDP;
2868
2869 /* Set up ifnet structure */
2870 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2871 if (ifp == NULL) {
2872 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2873 error = ENXIO;
2874 goto fail;
2875 }
2876 ifp->if_softc = sc;
2877 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2878 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2879 ifp->if_ioctl = bge_ioctl;
2880 ifp->if_start = bge_start;
2881 ifp->if_init = bge_init;
2882 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2883 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2884 IFQ_SET_READY(&ifp->if_snd);
2885 ifp->if_hwassist = sc->bge_csum_features;
2886 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2887 IFCAP_VLAN_MTU;
2888 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2889 ifp->if_hwassist |= CSUM_TSO;
2890 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
2891 }
2892#ifdef IFCAP_VLAN_HWCSUM
2893 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2894#endif
2895 ifp->if_capenable = ifp->if_capabilities;
2896#ifdef DEVICE_POLLING
2897 ifp->if_capabilities |= IFCAP_POLLING;
2898#endif
2899
2900 /*
2901 * 5700 B0 chips do not support checksumming correctly due
2902 * to hardware bugs.
2903 */
2904 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2905 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2906 ifp->if_capenable &= ~IFCAP_HWCSUM;
2907 ifp->if_hwassist = 0;
2908 }
2909
2910 /*
2911 * Figure out what sort of media we have by checking the
2912 * hardware config word in the first 32k of NIC internal memory,
2913 * or fall back to examining the EEPROM if necessary.
2914 * Note: on some BCM5700 cards, this value appears to be unset.
2915 * If that's the case, we have to rely on identifying the NIC
2916 * by its PCI subsystem ID, as we do below for the SysKonnect
2917 * SK-9D41.
2918 */
2919 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2920 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2921 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2922 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2923 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2924 sizeof(hwcfg))) {
2925 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2926 error = ENXIO;
2927 goto fail;
2928 }
2929 hwcfg = ntohl(hwcfg);
2930 }
2931
2932 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2933 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
2934 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2935 if (BGE_IS_5714_FAMILY(sc))
2936 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2937 else
2938 sc->bge_flags |= BGE_FLAG_TBI;
2939 }
2940
2941 if (sc->bge_flags & BGE_FLAG_TBI) {
2942 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2943 bge_ifmedia_sts);
2944 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2945 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2946 0, NULL);
2947 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2948 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2949 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2950 } else {
2951 /*
2952 * Do transceiver setup and tell the firmware the
2953 * driver is down so we can try to get access the
2954 * probe if ASF is running. Retry a couple of times
2955 * if we get a conflict with the ASF firmware accessing
2956 * the PHY.
2957 */
2958 trys = 0;
2959 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2960again:
2961 bge_asf_driver_up(sc);
2962
2963 error = (mii_attach(dev, &sc->bge_miibus, ifp,
2964 bge_ifmedia_upd, bge_ifmedia_sts, BMSR_DEFCAPMASK,
2965 phy_addr, MII_OFFSET_ANY, 0));
2966 if (error != 0) {
2967 if (trys++ < 4) {
2968 device_printf(sc->bge_dev, "Try again\n");
2969 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2970 BMCR_RESET);
2971 goto again;
2972 }
2973 device_printf(sc->bge_dev, "attaching PHYs failed\n");
2974 goto fail;
2975 }
2976
2977 /*
2978 * Now tell the firmware we are going up after probing the PHY
2979 */
2980 if (sc->bge_asf_mode & ASF_STACKUP)
2981 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2982 }
2983
2984 /*
2985 * When using the BCM5701 in PCI-X mode, data corruption has
2986 * been observed in the first few bytes of some received packets.
2987 * Aligning the packet buffer in memory eliminates the corruption.
2988 * Unfortunately, this misaligns the packet payloads. On platforms
2989 * which do not support unaligned accesses, we will realign the
2990 * payloads by copying the received packets.
2991 */
2992 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2993 sc->bge_flags & BGE_FLAG_PCIX)
2994 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2995
2996 /*
2997 * Call MI attach routine.
2998 */
2999 ether_ifattach(ifp, eaddr);
3000 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3001
3002 /* Tell upper layer we support long frames. */
3003 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3004
3005 /*
3006 * Hookup IRQ last.
3007 */
3008#if __FreeBSD_version > 700030
3009 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3010 /* Take advantage of single-shot MSI. */
3011 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3012 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3013 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3014 taskqueue_thread_enqueue, &sc->bge_tq);
3015 if (sc->bge_tq == NULL) {
3016 device_printf(dev, "could not create taskqueue.\n");
3017 ether_ifdetach(ifp);
3018 error = ENXIO;
3019 goto fail;
3020 }
3021 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3022 device_get_nameunit(sc->bge_dev));
3023 error = bus_setup_intr(dev, sc->bge_irq,
3024 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3025 &sc->bge_intrhand);
3026 if (error)
3027 ether_ifdetach(ifp);
3028 } else
3029 error = bus_setup_intr(dev, sc->bge_irq,
3030 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3031 &sc->bge_intrhand);
3032#else
3033 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
3034 bge_intr, sc, &sc->bge_intrhand);
3035#endif
3036
3037 if (error) {
3038 bge_detach(dev);
3039 device_printf(sc->bge_dev, "couldn't set up irq\n");
3040 }
3041
3042 return (0);
3043
3044fail:
3045 bge_release_resources(sc);
3046
3047 return (error);
3048}
3049
3050static int
3051bge_detach(device_t dev)
3052{
3053 struct bge_softc *sc;
3054 struct ifnet *ifp;
3055
3056 sc = device_get_softc(dev);
3057 ifp = sc->bge_ifp;
3058
3059#ifdef DEVICE_POLLING
3060 if (ifp->if_capenable & IFCAP_POLLING)
3061 ether_poll_deregister(ifp);
3062#endif
3063
3064 BGE_LOCK(sc);
3065 bge_stop(sc);
3066 bge_reset(sc);
3067 BGE_UNLOCK(sc);
3068
3069 callout_drain(&sc->bge_stat_ch);
3070
3071 if (sc->bge_tq)
3072 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3073 ether_ifdetach(ifp);
3074
3075 if (sc->bge_flags & BGE_FLAG_TBI) {
3076 ifmedia_removeall(&sc->bge_ifmedia);
3077 } else {
3078 bus_generic_detach(dev);
3079 device_delete_child(dev, sc->bge_miibus);
3080 }
3081
3082 bge_release_resources(sc);
3083
3084 return (0);
3085}
3086
3087static void
3088bge_release_resources(struct bge_softc *sc)
3089{
3090 device_t dev;
3091
3092 dev = sc->bge_dev;
3093
3094 if (sc->bge_tq != NULL)
3095 taskqueue_free(sc->bge_tq);
3096
3097 if (sc->bge_intrhand != NULL)
3098 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3099
3100 if (sc->bge_irq != NULL)
3101 bus_release_resource(dev, SYS_RES_IRQ,
3102 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3103
3104 if (sc->bge_flags & BGE_FLAG_MSI)
3105 pci_release_msi(dev);
3106
3107 if (sc->bge_res != NULL)
3108 bus_release_resource(dev, SYS_RES_MEMORY,
3109 PCIR_BAR(0), sc->bge_res);
3110
3111 if (sc->bge_ifp != NULL)
3112 if_free(sc->bge_ifp);
3113
3114 bge_dma_free(sc);
3115
3116 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3117 BGE_LOCK_DESTROY(sc);
3118}
3119
3120static int
3121bge_reset(struct bge_softc *sc)
3122{
3123 device_t dev;
3124 uint32_t cachesize, command, pcistate, reset, val;
3125 void (*write_op)(struct bge_softc *, int, int);
3126 uint16_t devctl;
3127 int i;
3128
3129 dev = sc->bge_dev;
3130
3131 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3132 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3133 if (sc->bge_flags & BGE_FLAG_PCIE)
3134 write_op = bge_writemem_direct;
3135 else
3136 write_op = bge_writemem_ind;
3137 } else
3138 write_op = bge_writereg_ind;
3139
3140 /* Save some important PCI state. */
3141 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3142 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3143 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3144
3145 pci_write_config(dev, BGE_PCI_MISC_CTL,
3146 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3147 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3148
3149 /* Disable fastboot on controllers that support it. */
3150 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3151 BGE_IS_5755_PLUS(sc)) {
3152 if (bootverbose)
3153 device_printf(dev, "Disabling fastboot\n");
3154 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3155 }
3156
3157 /*
3158 * Write the magic number to SRAM at offset 0xB50.
3159 * When firmware finishes its initialization it will
3160 * write ~BGE_MAGIC_NUMBER to the same location.
3161 */
3162 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3163
3164 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3165
3166 /* XXX: Broadcom Linux driver. */
3167 if (sc->bge_flags & BGE_FLAG_PCIE) {
3168 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3169 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3170 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3171 /* Prevent PCIE link training during global reset */
3172 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3173 reset |= 1 << 29;
3174 }
3175 }
3176
3177 /*
3178 * Set GPHY Power Down Override to leave GPHY
3179 * powered up in D0 uninitialized.
3180 */
3181 if (BGE_IS_5705_PLUS(sc))
3182 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3183
3184 /* Issue global reset */
3185 write_op(sc, BGE_MISC_CFG, reset);
3186
3187 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3188 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3189 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3190 val | BGE_VCPU_STATUS_DRV_RESET);
3191 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3192 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3193 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3194 }
3195
3196 DELAY(1000);
3197
3198 /* XXX: Broadcom Linux driver. */
3199 if (sc->bge_flags & BGE_FLAG_PCIE) {
3200 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3201 DELAY(500000); /* wait for link training to complete */
3202 val = pci_read_config(dev, 0xC4, 4);
3203 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3204 }
3205 devctl = pci_read_config(dev,
3206 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3207 /* Clear enable no snoop and disable relaxed ordering. */
3208 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3209 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3210 /* Set PCIE max payload size to 128. */
3211 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3212 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3213 devctl, 2);
3214 /* Clear error status. */
3215 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3216 PCIM_EXP_STA_CORRECTABLE_ERROR |
3217 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3218 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3219 }
3220
3221 /* Reset some of the PCI state that got zapped by reset. */
3222 pci_write_config(dev, BGE_PCI_MISC_CTL,
3223 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3224 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3225 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3226 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3227 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3228 /*
3229 * Disable PCI-X relaxed ordering to ensure status block update
3230 * comes first then packet buffer DMA. Otherwise driver may
3231 * read stale status block.
3232 */
3233 if (sc->bge_flags & BGE_FLAG_PCIX) {
3234 devctl = pci_read_config(dev,
3235 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3236 devctl &= ~PCIXM_COMMAND_ERO;
3237 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3238 devctl &= ~PCIXM_COMMAND_MAX_READ;
3239 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3240 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3241 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3242 PCIXM_COMMAND_MAX_READ);
3243 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3244 }
3245 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3246 devctl, 2);
3247 }
3248 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3249 if (BGE_IS_5714_FAMILY(sc)) {
3250 /* This chip disables MSI on reset. */
3251 if (sc->bge_flags & BGE_FLAG_MSI) {
3252 val = pci_read_config(dev,
3253 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3254 pci_write_config(dev,
3255 sc->bge_msicap + PCIR_MSI_CTRL,
3256 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3257 val = CSR_READ_4(sc, BGE_MSI_MODE);
3258 CSR_WRITE_4(sc, BGE_MSI_MODE,
3259 val | BGE_MSIMODE_ENABLE);
3260 }
3261 val = CSR_READ_4(sc, BGE_MARB_MODE);
3262 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3263 } else
3264 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3265
3266 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3267 for (i = 0; i < BGE_TIMEOUT; i++) {
3268 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3269 if (val & BGE_VCPU_STATUS_INIT_DONE)
3270 break;
3271 DELAY(100);
3272 }
3273 if (i == BGE_TIMEOUT) {
3274 device_printf(dev, "reset timed out\n");
3275 return (1);
3276 }
3277 } else {
3278 /*
3279 * Poll until we see the 1's complement of the magic number.
3280 * This indicates that the firmware initialization is complete.
3281 * We expect this to fail if no chip containing the Ethernet
3282 * address is fitted though.
3283 */
3284 for (i = 0; i < BGE_TIMEOUT; i++) {
3285 DELAY(10);
3286 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3287 if (val == ~BGE_MAGIC_NUMBER)
3288 break;
3289 }
3290
3291 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3292 device_printf(dev,
3293 "firmware handshake timed out, found 0x%08x\n",
3294 val);
3295 }
3296
3297 /*
3298 * XXX Wait for the value of the PCISTATE register to
3299 * return to its original pre-reset state. This is a
3300 * fairly good indicator of reset completion. If we don't
3301 * wait for the reset to fully complete, trying to read
3302 * from the device's non-PCI registers may yield garbage
3303 * results.
3304 */
3305 for (i = 0; i < BGE_TIMEOUT; i++) {
3306 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3307 break;
3308 DELAY(10);
3309 }
3310
3311 /* Fix up byte swapping. */
3312 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3313 BGE_MODECTL_BYTESWAP_DATA);
3314
3315 /* Tell the ASF firmware we are up */
3316 if (sc->bge_asf_mode & ASF_STACKUP)
3317 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3318
3319 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3320
3321 /*
3322 * The 5704 in TBI mode apparently needs some special
3323 * adjustment to insure the SERDES drive level is set
3324 * to 1.2V.
3325 */
3326 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3327 sc->bge_flags & BGE_FLAG_TBI) {
3328 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3329 val = (val & ~0xFFF) | 0x880;
3330 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3331 }
3332
3333 /* XXX: Broadcom Linux driver. */
3334 if (sc->bge_flags & BGE_FLAG_PCIE &&
3335 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3336 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3337 /* Enable Data FIFO protection. */
3338 val = CSR_READ_4(sc, 0x7C00);
3339 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3340 }
3341 DELAY(10000);
3342
3343 return (0);
3344}
3345
3346static __inline void
3347bge_rxreuse_std(struct bge_softc *sc, int i)
3348{
3349 struct bge_rx_bd *r;
3350
3351 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3352 r->bge_flags = BGE_RXBDFLAG_END;
3353 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3354 r->bge_idx = i;
3355 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3356}
3357
3358static __inline void
3359bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3360{
3361 struct bge_extrx_bd *r;
3362
3363 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3364 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3365 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3366 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3367 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3368 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3369 r->bge_idx = i;
3370 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3371}
3372
3373/*
3374 * Frame reception handling. This is called if there's a frame
3375 * on the receive return list.
3376 *
3377 * Note: we have to be able to handle two possibilities here:
3378 * 1) the frame is from the jumbo receive ring
3379 * 2) the frame is from the standard receive ring
3380 */
3381
3382static int
3383bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3384{
3385 struct ifnet *ifp;
3386 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3387 uint16_t rx_cons;
3388
3389 rx_cons = sc->bge_rx_saved_considx;
3390
3391 /* Nothing to do. */
3392 if (rx_cons == rx_prod)
3393 return (rx_npkts);
3394
3395 ifp = sc->bge_ifp;
3396
3397 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3398 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3399 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3400 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3401 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3402 (MCLBYTES - ETHER_ALIGN))
3403 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3404 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3405
3406 while (rx_cons != rx_prod) {
3407 struct bge_rx_bd *cur_rx;
3408 uint32_t rxidx;
3409 struct mbuf *m = NULL;
3410 uint16_t vlan_tag = 0;
3411 int have_tag = 0;
3412
3413#ifdef DEVICE_POLLING
3414 if (ifp->if_capenable & IFCAP_POLLING) {
3415 if (sc->rxcycles <= 0)
3416 break;
3417 sc->rxcycles--;
3418 }
3419#endif
3420
3421 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3422
3423 rxidx = cur_rx->bge_idx;
3424 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3425
3426 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3427 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3428 have_tag = 1;
3429 vlan_tag = cur_rx->bge_vlan_tag;
3430 }
3431
3432 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3433 jumbocnt++;
3434 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3435 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3436 bge_rxreuse_jumbo(sc, rxidx);
3437 continue;
3438 }
3439 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3440 bge_rxreuse_jumbo(sc, rxidx);
3441 ifp->if_iqdrops++;
3442 continue;
3443 }
3444 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3445 } else {
3446 stdcnt++;
3447 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3448 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3449 bge_rxreuse_std(sc, rxidx);
3450 continue;
3451 }
3452 if (bge_newbuf_std(sc, rxidx) != 0) {
3453 bge_rxreuse_std(sc, rxidx);
3454 ifp->if_iqdrops++;
3455 continue;
3456 }
3457 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3458 }
3459
3460 ifp->if_ipackets++;
3461#ifndef __NO_STRICT_ALIGNMENT
3462 /*
3463 * For architectures with strict alignment we must make sure
3464 * the payload is aligned.
3465 */
3466 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3467 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3468 cur_rx->bge_len);
3469 m->m_data += ETHER_ALIGN;
3470 }
3471#endif
3472 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3473 m->m_pkthdr.rcvif = ifp;
3474
3475 if (ifp->if_capenable & IFCAP_RXCSUM) {
3476 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3477 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3478 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3479 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3480 }
3481 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3482 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3483 m->m_pkthdr.csum_data =
3484 cur_rx->bge_tcp_udp_csum;
3485 m->m_pkthdr.csum_flags |=
3486 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3487 }
3488 }
3489
3490 /*
3491 * If we received a packet with a vlan tag,
3492 * attach that information to the packet.
3493 */
3494 if (have_tag) {
3495#if __FreeBSD_version > 700022
3496 m->m_pkthdr.ether_vtag = vlan_tag;
3497 m->m_flags |= M_VLANTAG;
3498#else
3499 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3500 if (m == NULL)
3501 continue;
3502#endif
3503 }
3504
3505 if (holdlck != 0) {
3506 BGE_UNLOCK(sc);
3507 (*ifp->if_input)(ifp, m);
3508 BGE_LOCK(sc);
3509 } else
3510 (*ifp->if_input)(ifp, m);
3511 rx_npkts++;
3512
3513 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3514 return (rx_npkts);
3515 }
3516
3517 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3518 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3519 if (stdcnt > 0)
3520 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3521 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3522
3523 if (jumbocnt > 0)
3524 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3525 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3526
3527 sc->bge_rx_saved_considx = rx_cons;
3528 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3529 if (stdcnt)
3530 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3531 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3532 if (jumbocnt)
3533 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3534 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3535#ifdef notyet
3536 /*
3537 * This register wraps very quickly under heavy packet drops.
3538 * If you need correct statistics, you can enable this check.
3539 */
3540 if (BGE_IS_5705_PLUS(sc))
3541 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3542#endif
3543 return (rx_npkts);
3544}
3545
3546static void
3547bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3548{
3549 struct bge_tx_bd *cur_tx;
3550 struct ifnet *ifp;
3551
3552 BGE_LOCK_ASSERT(sc);
3553
3554 /* Nothing to do. */
3555 if (sc->bge_tx_saved_considx == tx_cons)
3556 return;
3557
3558 ifp = sc->bge_ifp;
3559
3560 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3561 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3562 /*
3563 * Go through our tx ring and free mbufs for those
3564 * frames that have been sent.
3565 */
3566 while (sc->bge_tx_saved_considx != tx_cons) {
3567 uint32_t idx;
3568
3569 idx = sc->bge_tx_saved_considx;
3570 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3571 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3572 ifp->if_opackets++;
3573 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3574 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3575 sc->bge_cdata.bge_tx_dmamap[idx],
3576 BUS_DMASYNC_POSTWRITE);
3577 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3578 sc->bge_cdata.bge_tx_dmamap[idx]);
3579 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3580 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3581 }
3582 sc->bge_txcnt--;
3583 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3584 }
3585
3586 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3587 if (sc->bge_txcnt == 0)
3588 sc->bge_timer = 0;
3589}
3590
3591#ifdef DEVICE_POLLING
3592static int
3593bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3594{
3595 struct bge_softc *sc = ifp->if_softc;
3596 uint16_t rx_prod, tx_cons;
3597 uint32_t statusword;
3598 int rx_npkts = 0;
3599
3600 BGE_LOCK(sc);
3601 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3602 BGE_UNLOCK(sc);
3603 return (rx_npkts);
3604 }
3605
3606 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3607 sc->bge_cdata.bge_status_map,
3608 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3609 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3610 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3611
3612 statusword = sc->bge_ldata.bge_status_block->bge_status;
3613 sc->bge_ldata.bge_status_block->bge_status = 0;
3614
3615 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3616 sc->bge_cdata.bge_status_map,
3617 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3618
3619 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3620 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3621 sc->bge_link_evt++;
3622
3623 if (cmd == POLL_AND_CHECK_STATUS)
3624 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3625 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3626 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3627 bge_link_upd(sc);
3628
3629 sc->rxcycles = count;
3630 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3631 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3632 BGE_UNLOCK(sc);
3633 return (rx_npkts);
3634 }
3635 bge_txeof(sc, tx_cons);
3636 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3637 bge_start_locked(ifp);
3638
3639 BGE_UNLOCK(sc);
3640 return (rx_npkts);
3641}
3642#endif /* DEVICE_POLLING */
3643
3644static int
3645bge_msi_intr(void *arg)
3646{
3647 struct bge_softc *sc;
3648
3649 sc = (struct bge_softc *)arg;
3650 /*
3651 * This interrupt is not shared and controller already
3652 * disabled further interrupt.
3653 */
3654 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3655 return (FILTER_HANDLED);
3656}
3657
3658static void
3659bge_intr_task(void *arg, int pending)
3660{
3661 struct bge_softc *sc;
3662 struct ifnet *ifp;
3663 uint32_t status;
3664 uint16_t rx_prod, tx_cons;
3665
3666 sc = (struct bge_softc *)arg;
3667 ifp = sc->bge_ifp;
3668
3669 BGE_LOCK(sc);
3670 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3671 BGE_UNLOCK(sc);
3672 return;
3673 }
3674
3675 /* Get updated status block. */
3676 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3677 sc->bge_cdata.bge_status_map,
3678 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3679
3680 /* Save producer/consumer indexess. */
3681 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3682 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3683 status = sc->bge_ldata.bge_status_block->bge_status;
3684 sc->bge_ldata.bge_status_block->bge_status = 0;
3685 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3686 sc->bge_cdata.bge_status_map,
3687 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3688
3689 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3690 bge_link_upd(sc);
3691
3692 /* Let controller work. */
3693 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3694
3695 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3696 sc->bge_rx_saved_considx != rx_prod) {
3697 /* Check RX return ring producer/consumer. */
3698 BGE_UNLOCK(sc);
3699 bge_rxeof(sc, rx_prod, 0);
3700 BGE_LOCK(sc);
3701 }
3702 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3703 /* Check TX ring producer/consumer. */
3704 bge_txeof(sc, tx_cons);
3705 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3706 bge_start_locked(ifp);
3707 }
3708 BGE_UNLOCK(sc);
3709}
3710
3711static void
3712bge_intr(void *xsc)
3713{
3714 struct bge_softc *sc;
3715 struct ifnet *ifp;
3716 uint32_t statusword;
3717 uint16_t rx_prod, tx_cons;
3718
3719 sc = xsc;
3720
3721 BGE_LOCK(sc);
3722
3723 ifp = sc->bge_ifp;
3724
3725#ifdef DEVICE_POLLING
3726 if (ifp->if_capenable & IFCAP_POLLING) {
3727 BGE_UNLOCK(sc);
3728 return;
3729 }
3730#endif
3731
3732 /*
3733 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3734 * disable interrupts by writing nonzero like we used to, since with
3735 * our current organization this just gives complications and
3736 * pessimizations for re-enabling interrupts. We used to have races
3737 * instead of the necessary complications. Disabling interrupts
3738 * would just reduce the chance of a status update while we are
3739 * running (by switching to the interrupt-mode coalescence
3740 * parameters), but this chance is already very low so it is more
3741 * efficient to get another interrupt than prevent it.
3742 *
3743 * We do the ack first to ensure another interrupt if there is a
3744 * status update after the ack. We don't check for the status
3745 * changing later because it is more efficient to get another
3746 * interrupt than prevent it, not quite as above (not checking is
3747 * a smaller optimization than not toggling the interrupt enable,
3748 * since checking doesn't involve PCI accesses and toggling require
3749 * the status check). So toggling would probably be a pessimization
3750 * even with MSI. It would only be needed for using a task queue.
3751 */
3752 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3753
3754 /*
3755 * Do the mandatory PCI flush as well as get the link status.
3756 */
3757 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3758
3759 /* Make sure the descriptor ring indexes are coherent. */
3760 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3761 sc->bge_cdata.bge_status_map,
3762 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3763 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3764 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3765 sc->bge_ldata.bge_status_block->bge_status = 0;
3766 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3767 sc->bge_cdata.bge_status_map,
3768 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3769
3770 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3771 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3772 statusword || sc->bge_link_evt)
3773 bge_link_upd(sc);
3774
3775 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3776 /* Check RX return ring producer/consumer. */
3777 bge_rxeof(sc, rx_prod, 1);
3778 }
3779
3780 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3781 /* Check TX ring producer/consumer. */
3782 bge_txeof(sc, tx_cons);
3783 }
3784
3785 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3786 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3787 bge_start_locked(ifp);
3788
3789 BGE_UNLOCK(sc);
3790}
3791
3792static void
3793bge_asf_driver_up(struct bge_softc *sc)
3794{
3795 if (sc->bge_asf_mode & ASF_STACKUP) {
3796 /* Send ASF heartbeat aprox. every 2s */
3797 if (sc->bge_asf_count)
3798 sc->bge_asf_count --;
3799 else {
3800 sc->bge_asf_count = 2;
3801 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3802 BGE_FW_DRV_ALIVE);
3803 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3804 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3805 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3806 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3807 }
3808 }
3809}
3810
3811static void
3812bge_tick(void *xsc)
3813{
3814 struct bge_softc *sc = xsc;
3815 struct mii_data *mii = NULL;
3816
3817 BGE_LOCK_ASSERT(sc);
3818
3819 /* Synchronize with possible callout reset/stop. */
3820 if (callout_pending(&sc->bge_stat_ch) ||
3821 !callout_active(&sc->bge_stat_ch))
3822 return;
3823
3824 if (BGE_IS_5705_PLUS(sc))
3825 bge_stats_update_regs(sc);
3826 else
3827 bge_stats_update(sc);
3828
3829 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3830 mii = device_get_softc(sc->bge_miibus);
3831 /*
3832 * Do not touch PHY if we have link up. This could break
3833 * IPMI/ASF mode or produce extra input errors
3834 * (extra errors was reported for bcm5701 & bcm5704).
3835 */
3836 if (!sc->bge_link)
3837 mii_tick(mii);
3838 } else {
3839 /*
3840 * Since in TBI mode auto-polling can't be used we should poll
3841 * link status manually. Here we register pending link event
3842 * and trigger interrupt.
3843 */
3844#ifdef DEVICE_POLLING
3845 /* In polling mode we poll link state in bge_poll(). */
3846 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3847#endif
3848 {
3849 sc->bge_link_evt++;
3850 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3851 sc->bge_flags & BGE_FLAG_5788)
3852 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3853 else
3854 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3855 }
3856 }
3857
3858 bge_asf_driver_up(sc);
3859 bge_watchdog(sc);
3860
3861 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3862}
3863
3864static void
3865bge_stats_update_regs(struct bge_softc *sc)
3866{
3867 struct ifnet *ifp;
3868 struct bge_mac_stats *stats;
3869
3870 ifp = sc->bge_ifp;
3871 stats = &sc->bge_mac_stats;
3872
3873 stats->ifHCOutOctets +=
3874 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3875 stats->etherStatsCollisions +=
3876 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3877 stats->outXonSent +=
3878 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3879 stats->outXoffSent +=
3880 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3881 stats->dot3StatsInternalMacTransmitErrors +=
3882 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3883 stats->dot3StatsSingleCollisionFrames +=
3884 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3885 stats->dot3StatsMultipleCollisionFrames +=
3886 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3887 stats->dot3StatsDeferredTransmissions +=
3888 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3889 stats->dot3StatsExcessiveCollisions +=
3890 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3891 stats->dot3StatsLateCollisions +=
3892 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3893 stats->ifHCOutUcastPkts +=
3894 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3895 stats->ifHCOutMulticastPkts +=
3896 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3897 stats->ifHCOutBroadcastPkts +=
3898 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3899
3900 stats->ifHCInOctets +=
3901 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3902 stats->etherStatsFragments +=
3903 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3904 stats->ifHCInUcastPkts +=
3905 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3906 stats->ifHCInMulticastPkts +=
3907 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3908 stats->ifHCInBroadcastPkts +=
3909 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3910 stats->dot3StatsFCSErrors +=
3911 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3912 stats->dot3StatsAlignmentErrors +=
3913 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3914 stats->xonPauseFramesReceived +=
3915 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3916 stats->xoffPauseFramesReceived +=
3917 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3918 stats->macControlFramesReceived +=
3919 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3920 stats->xoffStateEntered +=
3921 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3922 stats->dot3StatsFramesTooLong +=
3923 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3924 stats->etherStatsJabbers +=
3925 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3926 stats->etherStatsUndersizePkts +=
3927 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3928
3929 stats->FramesDroppedDueToFilters +=
3930 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3931 stats->DmaWriteQueueFull +=
3932 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3933 stats->DmaWriteHighPriQueueFull +=
3934 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3935 stats->NoMoreRxBDs +=
3936 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3937 stats->InputDiscards +=
3938 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3939 stats->InputErrors +=
3940 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3941 stats->RecvThresholdHit +=
3942 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3943
3944 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
3945 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
3946 stats->InputErrors);
3947}
3948
3949static void
3950bge_stats_clear_regs(struct bge_softc *sc)
3951{
3952
3953 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3954 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3955 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3956 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3957 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3958 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3959 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3960 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3961 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3962 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3963 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3964 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3965 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3966
3967 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3968 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3969 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3970 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3971 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3972 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3973 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3974 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3975 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3976 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3977 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3978 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3979 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3980 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3981
3982 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3983 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3984 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3985 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3986 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3987 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3988 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3989}
3990
3991static void
3992bge_stats_update(struct bge_softc *sc)
3993{
3994 struct ifnet *ifp;
3995 bus_size_t stats;
3996 uint32_t cnt; /* current register value */
3997
3998 ifp = sc->bge_ifp;
3999
4000 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4001
4002#define READ_STAT(sc, stats, stat) \
4003 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4004
4005 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4006 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4007 sc->bge_tx_collisions = cnt;
4008
4009 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4010 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4011 sc->bge_rx_discards = cnt;
4012
4013 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4014 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4015 sc->bge_tx_discards = cnt;
4016
4017#undef READ_STAT
4018}
4019
4020/*
4021 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4022 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4023 * but when such padded frames employ the bge IP/TCP checksum offload,
4024 * the hardware checksum assist gives incorrect results (possibly
4025 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4026 * If we pad such runts with zeros, the onboard checksum comes out correct.
4027 */
4028static __inline int
4029bge_cksum_pad(struct mbuf *m)
4030{
4031 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4032 struct mbuf *last;
4033
4034 /* If there's only the packet-header and we can pad there, use it. */
4035 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4036 M_TRAILINGSPACE(m) >= padlen) {
4037 last = m;
4038 } else {
4039 /*
4040 * Walk packet chain to find last mbuf. We will either
4041 * pad there, or append a new mbuf and pad it.
4042 */
4043 for (last = m; last->m_next != NULL; last = last->m_next);
4044 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4045 /* Allocate new empty mbuf, pad it. Compact later. */
4046 struct mbuf *n;
4047
4048 MGET(n, M_DONTWAIT, MT_DATA);
4049 if (n == NULL)
4050 return (ENOBUFS);
4051 n->m_len = 0;
4052 last->m_next = n;
4053 last = n;
4054 }
4055 }
4056
4057 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4058 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4059 last->m_len += padlen;
4060 m->m_pkthdr.len += padlen;
4061
4062 return (0);
4063}
4064
4065static struct mbuf *
4066bge_check_short_dma(struct mbuf *m)
4067{
4068 struct mbuf *n;
4069 int found;
4070
4071 /*
4072 * If device receive two back-to-back send BDs with less than
4073 * or equal to 8 total bytes then the device may hang. The two
4074 * back-to-back send BDs must in the same frame for this failure
4075 * to occur. Scan mbuf chains and see whether two back-to-back
4076 * send BDs are there. If this is the case, allocate new mbuf
4077 * and copy the frame to workaround the silicon bug.
4078 */
4079 for (n = m, found = 0; n != NULL; n = n->m_next) {
4080 if (n->m_len < 8) {
4081 found++;
4082 if (found > 1)
4083 break;
4084 continue;
4085 }
4086 found = 0;
4087 }
4088
4089 if (found > 1) {
4090 n = m_defrag(m, M_DONTWAIT);
4091 if (n == NULL)
4092 m_freem(m);
4093 } else
4094 n = m;
4095 return (n);
4096}
4097
4098static struct mbuf *
4099bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
4100{
4101 struct ip *ip;
4102 struct tcphdr *tcp;
4103 struct mbuf *n;
4104 uint16_t hlen;
4105 uint32_t poff;
4106
4107 if (M_WRITABLE(m) == 0) {
4108 /* Get a writable copy. */
4109 n = m_dup(m, M_DONTWAIT);
4110 m_freem(m);
4111 if (n == NULL)
4112 return (NULL);
4113 m = n;
4114 }
4115 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4116 if (m == NULL)
4117 return (NULL);
4118 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4119 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4120 m = m_pullup(m, poff + sizeof(struct tcphdr));
4121 if (m == NULL)
4122 return (NULL);
4123 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4124 m = m_pullup(m, poff + (tcp->th_off << 2));
4125 if (m == NULL)
4126 return (NULL);
4127 /*
4128 * It seems controller doesn't modify IP length and TCP pseudo
4129 * checksum. These checksum computed by upper stack should be 0.
4130 */
4131 *mss = m->m_pkthdr.tso_segsz;
4132 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4133 ip->ip_sum = 0;
4134 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4135 /* Clear pseudo checksum computed by TCP stack. */
4136 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4137 tcp->th_sum = 0;
4138 /*
4139 * Broadcom controllers uses different descriptor format for
4140 * TSO depending on ASIC revision. Due to TSO-capable firmware
4141 * license issue and lower performance of firmware based TSO
4142 * we only support hardware based TSO which is applicable for
4143 * BCM5755 or newer controllers. Hardware based TSO uses 11
4144 * bits to store MSS and upper 5 bits are used to store IP/TCP
4145 * header length(including IP/TCP options). The header length
4146 * is expressed as 32 bits unit.
4147 */
4148 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4149 *mss |= (hlen << 11);
4150 return (m);
4151}
4152
4153/*
4154 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4155 * pointers to descriptors.
4156 */
4157static int
4158bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4159{
4160 bus_dma_segment_t segs[BGE_NSEG_NEW];
4161 bus_dmamap_t map;
4162 struct bge_tx_bd *d;
4163 struct mbuf *m = *m_head;
4164 uint32_t idx = *txidx;
4165 uint16_t csum_flags, mss, vlan_tag;
4166 int nsegs, i, error;
4167
4168 csum_flags = 0;
4169 mss = 0;
4170 vlan_tag = 0;
4171 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4172 m->m_next != NULL) {
4173 *m_head = bge_check_short_dma(m);
4174 if (*m_head == NULL)
4175 return (ENOBUFS);
4176 m = *m_head;
4177 }
4178 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4179 *m_head = m = bge_setup_tso(sc, m, &mss);
4180 if (*m_head == NULL)
4181 return (ENOBUFS);
4182 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4183 BGE_TXBDFLAG_CPU_POST_DMA;
4184 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4185 if (m->m_pkthdr.csum_flags & CSUM_IP)
4186 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4187 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4188 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4189 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4190 (error = bge_cksum_pad(m)) != 0) {
4191 m_freem(m);
4192 *m_head = NULL;
4193 return (error);
4194 }
4195 }
4196 if (m->m_flags & M_LASTFRAG)
4197 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4198 else if (m->m_flags & M_FRAG)
4199 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4200 }
4201
4202 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
4203 sc->bge_forced_collapse > 0 &&
4204 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4205 /*
4206 * Forcedly collapse mbuf chains to overcome hardware
4207 * limitation which only support a single outstanding
4208 * DMA read operation.
4209 */
4210 if (sc->bge_forced_collapse == 1)
4211 m = m_defrag(m, M_DONTWAIT);
4212 else
4213 m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
4214 if (m == NULL)
4215 m = *m_head;
4216 *m_head = m;
4217 }
4218
4219 map = sc->bge_cdata.bge_tx_dmamap[idx];
4220 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4221 &nsegs, BUS_DMA_NOWAIT);
4222 if (error == EFBIG) {
4223 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4224 if (m == NULL) {
4225 m_freem(*m_head);
4226 *m_head = NULL;
4227 return (ENOBUFS);
4228 }
4229 *m_head = m;
4230 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4231 m, segs, &nsegs, BUS_DMA_NOWAIT);
4232 if (error) {
4233 m_freem(m);
4234 *m_head = NULL;
4235 return (error);
4236 }
4237 } else if (error != 0)
4238 return (error);
4239
4240 /* Check if we have enough free send BDs. */
4241 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4242 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4243 return (ENOBUFS);
4244 }
4245
4246 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4247
4248#if __FreeBSD_version > 700022
4249 if (m->m_flags & M_VLANTAG) {
4250 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4251 vlan_tag = m->m_pkthdr.ether_vtag;
4252 }
4253#else
4254 {
4255 struct m_tag *mtag;
4256
4257 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4258 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4259 vlan_tag = VLAN_TAG_VALUE(mtag);
4260 }
4261 }
4262#endif
4263 for (i = 0; ; i++) {
4264 d = &sc->bge_ldata.bge_tx_ring[idx];
4265 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4266 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4267 d->bge_len = segs[i].ds_len;
4268 d->bge_flags = csum_flags;
4269 d->bge_vlan_tag = vlan_tag;
4270 d->bge_mss = mss;
4271 if (i == nsegs - 1)
4272 break;
4273 BGE_INC(idx, BGE_TX_RING_CNT);
4274 }
4275
4276 /* Mark the last segment as end of packet... */
4277 d->bge_flags |= BGE_TXBDFLAG_END;
4278
4279 /*
4280 * Insure that the map for this transmission
4281 * is placed at the array index of the last descriptor
4282 * in this chain.
4283 */
4284 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4285 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4286 sc->bge_cdata.bge_tx_chain[idx] = m;
4287 sc->bge_txcnt += nsegs;
4288
4289 BGE_INC(idx, BGE_TX_RING_CNT);
4290 *txidx = idx;
4291
4292 return (0);
4293}
4294
4295/*
4296 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4297 * to the mbuf data regions directly in the transmit descriptors.
4298 */
4299static void
4300bge_start_locked(struct ifnet *ifp)
4301{
4302 struct bge_softc *sc;
4303 struct mbuf *m_head;
4304 uint32_t prodidx;
4305 int count;
4306
4307 sc = ifp->if_softc;
4308 BGE_LOCK_ASSERT(sc);
4309
4310 if (!sc->bge_link ||
4311 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4312 IFF_DRV_RUNNING)
4313 return;
4314
4315 prodidx = sc->bge_tx_prodidx;
4316
4317 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4318 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4319 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4320 break;
4321 }
4322 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4323 if (m_head == NULL)
4324 break;
4325
4326 /*
4327 * XXX
4328 * The code inside the if() block is never reached since we
4329 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4330 * requests to checksum TCP/UDP in a fragmented packet.
4331 *
4332 * XXX
4333 * safety overkill. If this is a fragmented packet chain
4334 * with delayed TCP/UDP checksums, then only encapsulate
4335 * it if we have enough descriptors to handle the entire
4336 * chain at once.
4337 * (paranoia -- may not actually be needed)
4338 */
4339 if (m_head->m_flags & M_FIRSTFRAG &&
4340 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4341 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4342 m_head->m_pkthdr.csum_data + 16) {
4343 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4344 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4345 break;
4346 }
4347 }
4348
4349 /*
4350 * Pack the data into the transmit ring. If we
4351 * don't have room, set the OACTIVE flag and wait
4352 * for the NIC to drain the ring.
4353 */
4354 if (bge_encap(sc, &m_head, &prodidx)) {
4355 if (m_head == NULL)
4356 break;
4357 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4358 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4359 break;
4360 }
4361 ++count;
4362
4363 /*
4364 * If there's a BPF listener, bounce a copy of this frame
4365 * to him.
4366 */
4367#ifdef ETHER_BPF_MTAP
4368 ETHER_BPF_MTAP(ifp, m_head);
4369#else
4370 BPF_MTAP(ifp, m_head);
4371#endif
4372 }
4373
4374 if (count > 0) {
4375 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4376 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4377 /* Transmit. */
4378 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4379 /* 5700 b2 errata */
4380 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4381 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4382
4383 sc->bge_tx_prodidx = prodidx;
4384
4385 /*
4386 * Set a timeout in case the chip goes out to lunch.
4387 */
4388 sc->bge_timer = 5;
4389 }
4390}
4391
4392/*
4393 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4394 * to the mbuf data regions directly in the transmit descriptors.
4395 */
4396static void
4397bge_start(struct ifnet *ifp)
4398{
4399 struct bge_softc *sc;
4400
4401 sc = ifp->if_softc;
4402 BGE_LOCK(sc);
4403 bge_start_locked(ifp);
4404 BGE_UNLOCK(sc);
4405}
4406
4407static void
4408bge_init_locked(struct bge_softc *sc)
4409{
4410 struct ifnet *ifp;
4411 uint16_t *m;
4412 uint32_t mode;
4413
4414 BGE_LOCK_ASSERT(sc);
4415
4416 ifp = sc->bge_ifp;
4417
4418 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4419 return;
4420
4421 /* Cancel pending I/O and flush buffers. */
4422 bge_stop(sc);
4423
4424 bge_stop_fw(sc);
4425 bge_sig_pre_reset(sc, BGE_RESET_START);
4426 bge_reset(sc);
4427 bge_sig_legacy(sc, BGE_RESET_START);
4428 bge_sig_post_reset(sc, BGE_RESET_START);
4429
4430 bge_chipinit(sc);
4431
4432 /*
4433 * Init the various state machines, ring
4434 * control blocks and firmware.
4435 */
4436 if (bge_blockinit(sc)) {
4437 device_printf(sc->bge_dev, "initialization failure\n");
4438 return;
4439 }
4440
4441 ifp = sc->bge_ifp;
4442
4443 /* Specify MTU. */
4444 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4445 ETHER_HDR_LEN + ETHER_CRC_LEN +
4446 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4447
4448 /* Load our MAC address. */
4449 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4450 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4451 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4452
4453 /* Program promiscuous mode. */
4454 bge_setpromisc(sc);
4455
4456 /* Program multicast filter. */
4457 bge_setmulti(sc);
4458
4459 /* Program VLAN tag stripping. */
4460 bge_setvlan(sc);
4461
4462 /* Override UDP checksum offloading. */
4463 if (sc->bge_forced_udpcsum == 0)
4464 sc->bge_csum_features &= ~CSUM_UDP;
4465 else
4466 sc->bge_csum_features |= CSUM_UDP;
4467 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4468 ifp->if_capenable & IFCAP_TXCSUM) {
4469 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4470 ifp->if_hwassist |= sc->bge_csum_features;
4471 }
4472
4473 /* Init RX ring. */
4474 if (bge_init_rx_ring_std(sc) != 0) {
4475 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4476 bge_stop(sc);
4477 return;
4478 }
4479
4480 /*
4481 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4482 * memory to insure that the chip has in fact read the first
4483 * entry of the ring.
4484 */
4485 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4486 uint32_t v, i;
4487 for (i = 0; i < 10; i++) {
4488 DELAY(20);
4489 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4490 if (v == (MCLBYTES - ETHER_ALIGN))
4491 break;
4492 }
4493 if (i == 10)
4494 device_printf (sc->bge_dev,
4495 "5705 A0 chip failed to load RX ring\n");
4496 }
4497
4498 /* Init jumbo RX ring. */
4499 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4500 (MCLBYTES - ETHER_ALIGN)) {
4501 if (bge_init_rx_ring_jumbo(sc) != 0) {
4502 device_printf(sc->bge_dev,
4503 "no memory for jumbo Rx buffers.\n");
4504 bge_stop(sc);
4505 return;
4506 }
4507 }
4508
4509 /* Init our RX return ring index. */
4510 sc->bge_rx_saved_considx = 0;
4511
4512 /* Init our RX/TX stat counters. */
4513 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4514
4515 /* Init TX ring. */
4516 bge_init_tx_ring(sc);
4517
4518 /* Enable TX MAC state machine lockup fix. */
4519 mode = CSR_READ_4(sc, BGE_TX_MODE);
4520 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4521 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4522 /* Turn on transmitter. */
4523 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4524
4525 /* Turn on receiver. */
4526 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4527
4528 /*
4529 * Set the number of good frames to receive after RX MBUF
4530 * Low Watermark has been reached. After the RX MAC receives
4531 * this number of frames, it will drop subsequent incoming
4532 * frames until the MBUF High Watermark is reached.
4533 */
4534 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4535
4536 /* Clear MAC statistics. */
4537 if (BGE_IS_5705_PLUS(sc))
4538 bge_stats_clear_regs(sc);
4539
4540 /* Tell firmware we're alive. */
4541 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4542
4543#ifdef DEVICE_POLLING
4544 /* Disable interrupts if we are polling. */
4545 if (ifp->if_capenable & IFCAP_POLLING) {
4546 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4547 BGE_PCIMISCCTL_MASK_PCI_INTR);
4548 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4549 } else
4550#endif
4551
4552 /* Enable host interrupts. */
4553 {
4554 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4555 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4556 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4557 }
4558
4559 bge_ifmedia_upd_locked(ifp);
4560
4561 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4562 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4563
4564 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4565}
4566
4567static void
4568bge_init(void *xsc)
4569{
4570 struct bge_softc *sc = xsc;
4571
4572 BGE_LOCK(sc);
4573 bge_init_locked(sc);
4574 BGE_UNLOCK(sc);
4575}
4576
4577/*
4578 * Set media options.
4579 */
4580static int
4581bge_ifmedia_upd(struct ifnet *ifp)
4582{
4583 struct bge_softc *sc = ifp->if_softc;
4584 int res;
4585
4586 BGE_LOCK(sc);
4587 res = bge_ifmedia_upd_locked(ifp);
4588 BGE_UNLOCK(sc);
4589
4590 return (res);
4591}
4592
4593static int
4594bge_ifmedia_upd_locked(struct ifnet *ifp)
4595{
4596 struct bge_softc *sc = ifp->if_softc;
4597 struct mii_data *mii;
4598 struct mii_softc *miisc;
4599 struct ifmedia *ifm;
4600
4601 BGE_LOCK_ASSERT(sc);
4602
4603 ifm = &sc->bge_ifmedia;
4604
4605 /* If this is a 1000baseX NIC, enable the TBI port. */
4606 if (sc->bge_flags & BGE_FLAG_TBI) {
4607 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4608 return (EINVAL);
4609 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4610 case IFM_AUTO:
4611 /*
4612 * The BCM5704 ASIC appears to have a special
4613 * mechanism for programming the autoneg
4614 * advertisement registers in TBI mode.
4615 */
4616 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4617 uint32_t sgdig;
4618 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4619 if (sgdig & BGE_SGDIGSTS_DONE) {
4620 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4621 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4622 sgdig |= BGE_SGDIGCFG_AUTO |
4623 BGE_SGDIGCFG_PAUSE_CAP |
4624 BGE_SGDIGCFG_ASYM_PAUSE;
4625 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4626 sgdig | BGE_SGDIGCFG_SEND);
4627 DELAY(5);
4628 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4629 }
4630 }
4631 break;
4632 case IFM_1000_SX:
4633 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4634 BGE_CLRBIT(sc, BGE_MAC_MODE,
4635 BGE_MACMODE_HALF_DUPLEX);
4636 } else {
4637 BGE_SETBIT(sc, BGE_MAC_MODE,
4638 BGE_MACMODE_HALF_DUPLEX);
4639 }
4640 break;
4641 default:
4642 return (EINVAL);
4643 }
4644 return (0);
4645 }
4646
4647 sc->bge_link_evt++;
4648 mii = device_get_softc(sc->bge_miibus);
4649 if (mii->mii_instance)
4650 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4651 mii_phy_reset(miisc);
4652 mii_mediachg(mii);
4653
4654 /*
4655 * Force an interrupt so that we will call bge_link_upd
4656 * if needed and clear any pending link state attention.
4657 * Without this we are not getting any further interrupts
4658 * for link state changes and thus will not UP the link and
4659 * not be able to send in bge_start_locked. The only
4660 * way to get things working was to receive a packet and
4661 * get an RX intr.
4662 * bge_tick should help for fiber cards and we might not
4663 * need to do this here if BGE_FLAG_TBI is set but as
4664 * we poll for fiber anyway it should not harm.
4665 */
4666 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4667 sc->bge_flags & BGE_FLAG_5788)
4668 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4669 else
4670 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4671
4672 return (0);
4673}
4674
4675/*
4676 * Report current media status.
4677 */
4678static void
4679bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4680{
4681 struct bge_softc *sc = ifp->if_softc;
4682 struct mii_data *mii;
4683
4684 BGE_LOCK(sc);
4685
4686 if (sc->bge_flags & BGE_FLAG_TBI) {
4687 ifmr->ifm_status = IFM_AVALID;
4688 ifmr->ifm_active = IFM_ETHER;
4689 if (CSR_READ_4(sc, BGE_MAC_STS) &
4690 BGE_MACSTAT_TBI_PCS_SYNCHED)
4691 ifmr->ifm_status |= IFM_ACTIVE;
4692 else {
4693 ifmr->ifm_active |= IFM_NONE;
4694 BGE_UNLOCK(sc);
4695 return;
4696 }
4697 ifmr->ifm_active |= IFM_1000_SX;
4698 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4699 ifmr->ifm_active |= IFM_HDX;
4700 else
4701 ifmr->ifm_active |= IFM_FDX;
4702 BGE_UNLOCK(sc);
4703 return;
4704 }
4705
4706 mii = device_get_softc(sc->bge_miibus);
4707 mii_pollstat(mii);
4708 ifmr->ifm_active = mii->mii_media_active;
4709 ifmr->ifm_status = mii->mii_media_status;
4710
4711 BGE_UNLOCK(sc);
4712}
4713
4714static int
4715bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4716{
4717 struct bge_softc *sc = ifp->if_softc;
4718 struct ifreq *ifr = (struct ifreq *) data;
4719 struct mii_data *mii;
4720 int flags, mask, error = 0;
4721
4722 switch (command) {
4723 case SIOCSIFMTU:
4724 BGE_LOCK(sc);
4725 if (ifr->ifr_mtu < ETHERMIN ||
4726 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4727 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4728 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4729 ifr->ifr_mtu > ETHERMTU))
4730 error = EINVAL;
4731 else if (ifp->if_mtu != ifr->ifr_mtu) {
4732 ifp->if_mtu = ifr->ifr_mtu;
4733 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4734 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4735 bge_init_locked(sc);
4736 }
4737 }
4738 BGE_UNLOCK(sc);
4739 break;
4740 case SIOCSIFFLAGS:
4741 BGE_LOCK(sc);
4742 if (ifp->if_flags & IFF_UP) {
4743 /*
4744 * If only the state of the PROMISC flag changed,
4745 * then just use the 'set promisc mode' command
4746 * instead of reinitializing the entire NIC. Doing
4747 * a full re-init means reloading the firmware and
4748 * waiting for it to start up, which may take a
4749 * second or two. Similarly for ALLMULTI.
4750 */
4751 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4752 flags = ifp->if_flags ^ sc->bge_if_flags;
4753 if (flags & IFF_PROMISC)
4754 bge_setpromisc(sc);
4755 if (flags & IFF_ALLMULTI)
4756 bge_setmulti(sc);
4757 } else
4758 bge_init_locked(sc);
4759 } else {
4760 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4761 bge_stop(sc);
4762 }
4763 }
4764 sc->bge_if_flags = ifp->if_flags;
4765 BGE_UNLOCK(sc);
4766 error = 0;
4767 break;
4768 case SIOCADDMULTI:
4769 case SIOCDELMULTI:
4770 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4771 BGE_LOCK(sc);
4772 bge_setmulti(sc);
4773 BGE_UNLOCK(sc);
4774 error = 0;
4775 }
4776 break;
4777 case SIOCSIFMEDIA:
4778 case SIOCGIFMEDIA:
4779 if (sc->bge_flags & BGE_FLAG_TBI) {
4780 error = ifmedia_ioctl(ifp, ifr,
4781 &sc->bge_ifmedia, command);
4782 } else {
4783 mii = device_get_softc(sc->bge_miibus);
4784 error = ifmedia_ioctl(ifp, ifr,
4785 &mii->mii_media, command);
4786 }
4787 break;
4788 case SIOCSIFCAP:
4789 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4790#ifdef DEVICE_POLLING
4791 if (mask & IFCAP_POLLING) {
4792 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4793 error = ether_poll_register(bge_poll, ifp);
4794 if (error)
4795 return (error);
4796 BGE_LOCK(sc);
4797 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4798 BGE_PCIMISCCTL_MASK_PCI_INTR);
4799 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4800 ifp->if_capenable |= IFCAP_POLLING;
4801 BGE_UNLOCK(sc);
4802 } else {
4803 error = ether_poll_deregister(ifp);
4804 /* Enable interrupt even in error case */
4805 BGE_LOCK(sc);
4806 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4807 BGE_PCIMISCCTL_MASK_PCI_INTR);
4808 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4809 ifp->if_capenable &= ~IFCAP_POLLING;
4810 BGE_UNLOCK(sc);
4811 }
4812 }
4813#endif
4814 if ((mask & IFCAP_TXCSUM) != 0 &&
4815 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
4816 ifp->if_capenable ^= IFCAP_TXCSUM;
4817 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
4818 ifp->if_hwassist |= sc->bge_csum_features;
4819 else
4820 ifp->if_hwassist &= ~sc->bge_csum_features;
4821 }
4822
4823 if ((mask & IFCAP_RXCSUM) != 0 &&
4824 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
4825 ifp->if_capenable ^= IFCAP_RXCSUM;
4826
4827 if ((mask & IFCAP_TSO4) != 0 &&
4828 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4829 ifp->if_capenable ^= IFCAP_TSO4;
4830 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4831 ifp->if_hwassist |= CSUM_TSO;
4832 else
4833 ifp->if_hwassist &= ~CSUM_TSO;
4834 }
4835
4836 if (mask & IFCAP_VLAN_MTU) {
4837 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4838 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4839 bge_init(sc);
4840 }
4841
4842 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
4843 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
4844 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4845 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
4846 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
4847 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4848 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
4849 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
4850 BGE_LOCK(sc);
4851 bge_setvlan(sc);
4852 BGE_UNLOCK(sc);
4853 }
4854#ifdef VLAN_CAPABILITIES
4855 VLAN_CAPABILITIES(ifp);
4856#endif
4857 break;
4858 default:
4859 error = ether_ioctl(ifp, command, data);
4860 break;
4861 }
4862
4863 return (error);
4864}
4865
4866static void
4867bge_watchdog(struct bge_softc *sc)
4868{
4869 struct ifnet *ifp;
4870
4871 BGE_LOCK_ASSERT(sc);
4872
4873 if (sc->bge_timer == 0 || --sc->bge_timer)
4874 return;
4875
4876 ifp = sc->bge_ifp;
4877
4878 if_printf(ifp, "watchdog timeout -- resetting\n");
4879
4880 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4881 bge_init_locked(sc);
4882
4883 ifp->if_oerrors++;
4884}
4885
4886/*
4887 * Stop the adapter and free any mbufs allocated to the
4888 * RX and TX lists.
4889 */
4890static void
4891bge_stop(struct bge_softc *sc)
4892{
4893 struct ifnet *ifp;
4894
4895 BGE_LOCK_ASSERT(sc);
4896
4897 ifp = sc->bge_ifp;
4898
4899 callout_stop(&sc->bge_stat_ch);
4900
4901 /* Disable host interrupts. */
4902 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4903 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4904
4905 /*
4906 * Tell firmware we're shutting down.
4907 */
4908 bge_stop_fw(sc);
4909 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4910
4911 /*
4912 * Disable all of the receiver blocks.
4913 */
4914 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4915 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4916 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4917 if (!(BGE_IS_5705_PLUS(sc)))
4918 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4919 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4920 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4921 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4922
4923 /*
4924 * Disable all of the transmit blocks.
4925 */
4926 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4927 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4928 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4929 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4930 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4931 if (!(BGE_IS_5705_PLUS(sc)))
4932 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4933 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4934
4935 /*
4936 * Shut down all of the memory managers and related
4937 * state machines.
4938 */
4939 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4940 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4941 if (!(BGE_IS_5705_PLUS(sc)))
4942 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4943 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4944 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4945 if (!(BGE_IS_5705_PLUS(sc))) {
4946 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4947 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4948 }
4949 /* Update MAC statistics. */
4950 if (BGE_IS_5705_PLUS(sc))
4951 bge_stats_update_regs(sc);
4952
4953 bge_reset(sc);
4954 bge_sig_legacy(sc, BGE_RESET_STOP);
4955 bge_sig_post_reset(sc, BGE_RESET_STOP);
4956
4957 /*
4958 * Keep the ASF firmware running if up.
4959 */
4960 if (sc->bge_asf_mode & ASF_STACKUP)
4961 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4962 else
4963 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4964
4965 /* Free the RX lists. */
4966 bge_free_rx_ring_std(sc);
4967
4968 /* Free jumbo RX list. */
4969 if (BGE_IS_JUMBO_CAPABLE(sc))
4970 bge_free_rx_ring_jumbo(sc);
4971
4972 /* Free TX buffers. */
4973 bge_free_tx_ring(sc);
4974
4975 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4976
4977 /* Clear MAC's link state (PHY may still have link UP). */
4978 if (bootverbose && sc->bge_link)
4979 if_printf(sc->bge_ifp, "link DOWN\n");
4980 sc->bge_link = 0;
4981
4982 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4983}
4984
4985/*
4986 * Stop all chip I/O so that the kernel's probe routines don't
4987 * get confused by errant DMAs when rebooting.
4988 */
4989static int
4990bge_shutdown(device_t dev)
4991{
4992 struct bge_softc *sc;
4993
4994 sc = device_get_softc(dev);
4995 BGE_LOCK(sc);
4996 bge_stop(sc);
4997 bge_reset(sc);
4998 BGE_UNLOCK(sc);
4999
5000 return (0);
5001}
5002
5003static int
5004bge_suspend(device_t dev)
5005{
5006 struct bge_softc *sc;
5007
5008 sc = device_get_softc(dev);
5009 BGE_LOCK(sc);
5010 bge_stop(sc);
5011 BGE_UNLOCK(sc);
5012
5013 return (0);
5014}
5015
5016static int
5017bge_resume(device_t dev)
5018{
5019 struct bge_softc *sc;
5020 struct ifnet *ifp;
5021
5022 sc = device_get_softc(dev);
5023 BGE_LOCK(sc);
5024 ifp = sc->bge_ifp;
5025 if (ifp->if_flags & IFF_UP) {
5026 bge_init_locked(sc);
5027 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5028 bge_start_locked(ifp);
5029 }
5030 BGE_UNLOCK(sc);
5031
5032 return (0);
5033}
5034
5035static void
5036bge_link_upd(struct bge_softc *sc)
5037{
5038 struct mii_data *mii;
5039 uint32_t link, status;
5040
5041 BGE_LOCK_ASSERT(sc);
5042
5043 /* Clear 'pending link event' flag. */
5044 sc->bge_link_evt = 0;
5045
5046 /*
5047 * Process link state changes.
5048 * Grrr. The link status word in the status block does
5049 * not work correctly on the BCM5700 rev AX and BX chips,
5050 * according to all available information. Hence, we have
5051 * to enable MII interrupts in order to properly obtain
5052 * async link changes. Unfortunately, this also means that
5053 * we have to read the MAC status register to detect link
5054 * changes, thereby adding an additional register access to
5055 * the interrupt handler.
5056 *
5057 * XXX: perhaps link state detection procedure used for
5058 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5059 */
5060
5061 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5062 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5063 status = CSR_READ_4(sc, BGE_MAC_STS);
5064 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5065 mii = device_get_softc(sc->bge_miibus);
5066 mii_pollstat(mii);
5067 if (!sc->bge_link &&
5068 mii->mii_media_status & IFM_ACTIVE &&
5069 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5070 sc->bge_link++;
5071 if (bootverbose)
5072 if_printf(sc->bge_ifp, "link UP\n");
5073 } else if (sc->bge_link &&
5074 (!(mii->mii_media_status & IFM_ACTIVE) ||
5075 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5076 sc->bge_link = 0;
5077 if (bootverbose)
5078 if_printf(sc->bge_ifp, "link DOWN\n");
5079 }
5080
5081 /* Clear the interrupt. */
5082 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5083 BGE_EVTENB_MI_INTERRUPT);
5084 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5085 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5086 BRGPHY_INTRS);
5087 }
5088 return;
5089 }
5090
5091 if (sc->bge_flags & BGE_FLAG_TBI) {
5092 status = CSR_READ_4(sc, BGE_MAC_STS);
5093 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5094 if (!sc->bge_link) {
5095 sc->bge_link++;
5096 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5097 BGE_CLRBIT(sc, BGE_MAC_MODE,
5098 BGE_MACMODE_TBI_SEND_CFGS);
5099 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5100 if (bootverbose)
5101 if_printf(sc->bge_ifp, "link UP\n");
5102 if_link_state_change(sc->bge_ifp,
5103 LINK_STATE_UP);
5104 }
5105 } else if (sc->bge_link) {
5106 sc->bge_link = 0;
5107 if (bootverbose)
5108 if_printf(sc->bge_ifp, "link DOWN\n");
5109 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5110 }
5111 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5112 /*
5113 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5114 * in status word always set. Workaround this bug by reading
5115 * PHY link status directly.
5116 */
5117 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5118
5119 if (link != sc->bge_link ||
5120 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5121 mii = device_get_softc(sc->bge_miibus);
5122 mii_pollstat(mii);
5123 if (!sc->bge_link &&
5124 mii->mii_media_status & IFM_ACTIVE &&
5125 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5126 sc->bge_link++;
5127 if (bootverbose)
5128 if_printf(sc->bge_ifp, "link UP\n");
5129 } else if (sc->bge_link &&
5130 (!(mii->mii_media_status & IFM_ACTIVE) ||
5131 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5132 sc->bge_link = 0;
5133 if (bootverbose)
5134 if_printf(sc->bge_ifp, "link DOWN\n");
5135 }
5136 }
5137 } else {
5138 /*
5139 * For controllers that call mii_tick, we have to poll
5140 * link status.
5141 */
5142 mii = device_get_softc(sc->bge_miibus);
5143 mii_pollstat(mii);
5144 bge_miibus_statchg(sc->bge_dev);
5145 }
5146
5147 /* Clear the attention. */
5148 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5149 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5150 BGE_MACSTAT_LINK_CHANGED);
5151}
5152
5153static void
5154bge_add_sysctls(struct bge_softc *sc)
5155{
5156 struct sysctl_ctx_list *ctx;
5157 struct sysctl_oid_list *children;
5158 char tn[32];
5159 int unit;
5160
5161 ctx = device_get_sysctl_ctx(sc->bge_dev);
5162 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5163
5164#ifdef BGE_REGISTER_DEBUG
5165 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5166 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5167 "Debug Information");
5168
5169 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5170 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5171 "Register Read");
5172
5173 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5174 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5175 "Memory Read");
5176
5177#endif
5178
5179 unit = device_get_unit(sc->bge_dev);
5180 /*
5181 * A common design characteristic for many Broadcom client controllers
5182 * is that they only support a single outstanding DMA read operation
5183 * on the PCIe bus. This means that it will take twice as long to fetch
5184 * a TX frame that is split into header and payload buffers as it does
5185 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5186 * these controllers, coalescing buffers to reduce the number of memory
5187 * reads is effective way to get maximum performance(about 940Mbps).
5188 * Without collapsing TX buffers the maximum TCP bulk transfer
5189 * performance is about 850Mbps. However forcing coalescing mbufs
5190 * consumes a lot of CPU cycles, so leave it off by default.
5191 */
5192 sc->bge_forced_collapse = 0;
5193 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5194 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5195 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5196 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5197 "Number of fragmented TX buffers of a frame allowed before "
5198 "forced collapsing");
5199
5200 /*
5201 * It seems all Broadcom controllers have a bug that can generate UDP
5202 * datagrams with checksum value 0 when TX UDP checksum offloading is
5203 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5204 * Even though the probability of generating such UDP datagrams is
5205 * low, I don't want to see FreeBSD boxes to inject such datagrams
5206 * into network so disable UDP checksum offloading by default. Users
5207 * still override this behavior by setting a sysctl variable,
5208 * dev.bge.0.forced_udpcsum.
5209 */
5210 sc->bge_forced_udpcsum = 0;
5211 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5212 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5213 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5214 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5215 "Enable UDP checksum offloading even if controller can "
5216 "generate UDP checksum value 0");
5217
5218 if (BGE_IS_5705_PLUS(sc))
5219 bge_add_sysctl_stats_regs(sc, ctx, children);
5220 else
5221 bge_add_sysctl_stats(sc, ctx, children);
5222}
5223
5224#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5225 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5226 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5227 desc)
5228
5229static void
5230bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5231 struct sysctl_oid_list *parent)
5232{
5233 struct sysctl_oid *tree;
5234 struct sysctl_oid_list *children, *schildren;
5235
5236 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5237 NULL, "BGE Statistics");
5238 schildren = children = SYSCTL_CHILDREN(tree);
5239 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5240 children, COSFramesDroppedDueToFilters,
5241 "FramesDroppedDueToFilters");
5242 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5243 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5244 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5245 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5246 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5247 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5248 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5249 children, ifInDiscards, "InputDiscards");
5250 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5251 children, ifInErrors, "InputErrors");
5252 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5253 children, nicRecvThresholdHit, "RecvThresholdHit");
5254 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5255 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5256 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5257 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5258 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5259 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5260 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5261 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5262 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5263 children, nicRingStatusUpdate, "RingStatusUpdate");
5264 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5265 children, nicInterrupts, "Interrupts");
5266 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5267 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5268 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5269 children, nicSendThresholdHit, "SendThresholdHit");
5270
5271 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5272 NULL, "BGE RX Statistics");
5273 children = SYSCTL_CHILDREN(tree);
5274 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5275 children, rxstats.ifHCInOctets, "ifHCInOctets");
5276 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5277 children, rxstats.etherStatsFragments, "Fragments");
5278 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5279 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5280 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5281 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5282 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5283 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5284 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5285 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5286 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5287 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5288 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5289 children, rxstats.xoffPauseFramesReceived,
5290 "xoffPauseFramesReceived");
5291 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5292 children, rxstats.macControlFramesReceived,
5293 "ControlFramesReceived");
5294 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5295 children, rxstats.xoffStateEntered, "xoffStateEntered");
5296 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5297 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5298 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5299 children, rxstats.etherStatsJabbers, "Jabbers");
5300 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5301 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5302 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5303 children, rxstats.inRangeLengthError, "inRangeLengthError");
5304 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5305 children, rxstats.outRangeLengthError, "outRangeLengthError");
5306
5307 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5308 NULL, "BGE TX Statistics");
5309 children = SYSCTL_CHILDREN(tree);
5310 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5311 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5312 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5313 children, txstats.etherStatsCollisions, "Collisions");
5314 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5315 children, txstats.outXonSent, "XonSent");
5316 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5317 children, txstats.outXoffSent, "XoffSent");
5318 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5319 children, txstats.flowControlDone, "flowControlDone");
5320 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5321 children, txstats.dot3StatsInternalMacTransmitErrors,
5322 "InternalMacTransmitErrors");
5323 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5324 children, txstats.dot3StatsSingleCollisionFrames,
5325 "SingleCollisionFrames");
5326 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5327 children, txstats.dot3StatsMultipleCollisionFrames,
5328 "MultipleCollisionFrames");
5329 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5330 children, txstats.dot3StatsDeferredTransmissions,
5331 "DeferredTransmissions");
5332 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5333 children, txstats.dot3StatsExcessiveCollisions,
5334 "ExcessiveCollisions");
5335 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5336 children, txstats.dot3StatsLateCollisions,
5337 "LateCollisions");
5338 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5339 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5340 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5341 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5342 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5343 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5344 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5345 children, txstats.dot3StatsCarrierSenseErrors,
5346 "CarrierSenseErrors");
5347 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5348 children, txstats.ifOutDiscards, "Discards");
5349 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5350 children, txstats.ifOutErrors, "Errors");
5351}
5352
5353#undef BGE_SYSCTL_STAT
5354
5355#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5356 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5357
5358static void
5359bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5360 struct sysctl_oid_list *parent)
5361{
5362 struct sysctl_oid *tree;
5363 struct sysctl_oid_list *child, *schild;
5364 struct bge_mac_stats *stats;
5365
5366 stats = &sc->bge_mac_stats;
5367 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5368 NULL, "BGE Statistics");
5369 schild = child = SYSCTL_CHILDREN(tree);
5370 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5371 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5372 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5373 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5374 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5375 &stats->DmaWriteHighPriQueueFull,
5376 "NIC DMA Write High Priority Queue Full");
5377 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5378 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5379 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5380 &stats->InputDiscards, "Discarded Input Frames");
5381 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5382 &stats->InputErrors, "Input Errors");
5383 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5384 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5385
5386 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5387 NULL, "BGE RX Statistics");
5388 child = SYSCTL_CHILDREN(tree);
5389 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5390 &stats->ifHCInOctets, "Inbound Octets");
5391 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5392 &stats->etherStatsFragments, "Fragments");
5393 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5394 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5395 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5396 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5397 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5398 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5399 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5400 &stats->dot3StatsFCSErrors, "FCS Errors");
5401 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5402 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5403 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5404 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5405 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5406 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5407 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5408 &stats->macControlFramesReceived, "MAC Control Frames Received");
5409 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5410 &stats->xoffStateEntered, "XOFF State Entered");
5411 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5412 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5413 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5414 &stats->etherStatsJabbers, "Jabbers");
5415 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5416 &stats->etherStatsUndersizePkts, "Undersized Packets");
5417
5418 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5419 NULL, "BGE TX Statistics");
5420 child = SYSCTL_CHILDREN(tree);
5421 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5422 &stats->ifHCOutOctets, "Outbound Octets");
5423 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5424 &stats->etherStatsCollisions, "TX Collisions");
5425 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5426 &stats->outXonSent, "XON Sent");
5427 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5428 &stats->outXoffSent, "XOFF Sent");
5429 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5430 &stats->dot3StatsInternalMacTransmitErrors,
5431 "Internal MAC TX Errors");
5432 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5433 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5434 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5435 &stats->dot3StatsMultipleCollisionFrames,
5436 "Multiple Collision Frames");
5437 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5438 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5439 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5440 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5441 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5442 &stats->dot3StatsLateCollisions, "Late Collisions");
5443 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5444 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5445 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5446 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5447 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5448 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5449}
5450
5451#undef BGE_SYSCTL_STAT_ADD64
5452
5453static int
5454bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5455{
5456 struct bge_softc *sc;
5457 uint32_t result;
5458 int offset;
5459
5460 sc = (struct bge_softc *)arg1;
5461 offset = arg2;
5462 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5463 offsetof(bge_hostaddr, bge_addr_lo));
5464 return (sysctl_handle_int(oidp, &result, 0, req));
5465}
5466
5467#ifdef BGE_REGISTER_DEBUG
5468static int
5469bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5470{
5471 struct bge_softc *sc;
5472 uint16_t *sbdata;
5473 int error;
5474 int result;
5475 int i, j;
5476
5477 result = -1;
5478 error = sysctl_handle_int(oidp, &result, 0, req);
5479 if (error || (req->newptr == NULL))
5480 return (error);
5481
5482 if (result == 1) {
5483 sc = (struct bge_softc *)arg1;
5484
5485 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5486 printf("Status Block:\n");
5487 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5488 printf("%06x:", i);
5489 for (j = 0; j < 8; j++) {
5490 printf(" %04x", sbdata[i]);
5491 i += 4;
5492 }
5493 printf("\n");
5494 }
5495
5496 printf("Registers:\n");
5497 for (i = 0x800; i < 0xA00; ) {
5498 printf("%06x:", i);
5499 for (j = 0; j < 8; j++) {
5500 printf(" %08x", CSR_READ_4(sc, i));
5501 i += 4;
5502 }
5503 printf("\n");
5504 }
5505
5506 printf("Hardware Flags:\n");
5507 if (BGE_IS_5755_PLUS(sc))
5508 printf(" - 5755 Plus\n");
5509 if (BGE_IS_575X_PLUS(sc))
5510 printf(" - 575X Plus\n");
5511 if (BGE_IS_5705_PLUS(sc))
5512 printf(" - 5705 Plus\n");
5513 if (BGE_IS_5714_FAMILY(sc))
5514 printf(" - 5714 Family\n");
5515 if (BGE_IS_5700_FAMILY(sc))
5516 printf(" - 5700 Family\n");
5517 if (sc->bge_flags & BGE_FLAG_JUMBO)
5518 printf(" - Supports Jumbo Frames\n");
5519 if (sc->bge_flags & BGE_FLAG_PCIX)
5520 printf(" - PCI-X Bus\n");
5521 if (sc->bge_flags & BGE_FLAG_PCIE)
5522 printf(" - PCI Express Bus\n");
5523 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5524 printf(" - No 3 LEDs\n");
5525 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5526 printf(" - RX Alignment Bug\n");
5527 }
5528
5529 return (error);
5530}
5531
5532static int
5533bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5534{
5535 struct bge_softc *sc;
5536 int error;
5537 uint16_t result;
5538 uint32_t val;
5539
5540 result = -1;
5541 error = sysctl_handle_int(oidp, &result, 0, req);
5542 if (error || (req->newptr == NULL))
5543 return (error);
5544
5545 if (result < 0x8000) {
5546 sc = (struct bge_softc *)arg1;
5547 val = CSR_READ_4(sc, result);
5548 printf("reg 0x%06X = 0x%08X\n", result, val);
5549 }
5550
5551 return (error);
5552}
5553
5554static int
5555bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5556{
5557 struct bge_softc *sc;
5558 int error;
5559 uint16_t result;
5560 uint32_t val;
5561
5562 result = -1;
5563 error = sysctl_handle_int(oidp, &result, 0, req);
5564 if (error || (req->newptr == NULL))
5565 return (error);
5566
5567 if (result < 0x8000) {
5568 sc = (struct bge_softc *)arg1;
5569 val = bge_readmem_ind(sc, result);
5570 printf("mem 0x%06X = 0x%08X\n", result, val);
5571 }
5572
5573 return (error);
5574}
5575#endif
5576
5577static int
5578bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5579{
5580
5581 if (sc->bge_flags & BGE_FLAG_EADDR)
5582 return (1);
5583
5584#ifdef __sparc64__
5585 OF_getetheraddr(sc->bge_dev, ether_addr);
5586 return (0);
5587#endif
5588 return (1);
5589}
5590
5591static int
5592bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5593{
5594 uint32_t mac_addr;
5595
5596 mac_addr = bge_readmem_ind(sc, 0x0c14);
5597 if ((mac_addr >> 16) == 0x484b) {
5598 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5599 ether_addr[1] = (uint8_t)mac_addr;
5600 mac_addr = bge_readmem_ind(sc, 0x0c18);
5601 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5602 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5603 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5604 ether_addr[5] = (uint8_t)mac_addr;
5605 return (0);
5606 }
5607 return (1);
5608}
5609
5610static int
5611bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5612{
5613 int mac_offset = BGE_EE_MAC_OFFSET;
5614
5615 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5616 mac_offset = BGE_EE_MAC_OFFSET_5906;
5617
5618 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5619 ETHER_ADDR_LEN));
5620}
5621
5622static int
5623bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5624{
5625
5626 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5627 return (1);
5628
5629 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5630 ETHER_ADDR_LEN));
5631}
5632
5633static int
5634bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5635{
5636 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5637 /* NOTE: Order is critical */
5638 bge_get_eaddr_fw,
5639 bge_get_eaddr_mem,
5640 bge_get_eaddr_nvram,
5641 bge_get_eaddr_eeprom,
5642 NULL
5643 };
5644 const bge_eaddr_fcn_t *func;
5645
5646 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5647 if ((*func)(sc, eaddr) == 0)
5648 break;
5649 }
5650 return (*func == NULL ? ENXIO : 0);
5651}
1701 /*
1702 * The BD ring replenish thresholds control how often the
1703 * hardware fetches new BD's from the producer rings in host
1704 * memory. Setting the value too low on a busy system can
1705 * starve the hardware and recue the throughpout.
1706 *
1707 * Set the BD ring replentish thresholds. The recommended
1708 * values are 1/8th the number of descriptors allocated to
1709 * each ring.
1710 * XXX The 5754 requires a lower threshold, so it might be a
1711 * requirement of all 575x family chips. The Linux driver sets
1712 * the lower threshold for all 5705 family chips as well, but there
1713 * are reports that it might not need to be so strict.
1714 *
1715 * XXX Linux does some extra fiddling here for the 5906 parts as
1716 * well.
1717 */
1718 if (BGE_IS_5705_PLUS(sc))
1719 val = 8;
1720 else
1721 val = BGE_STD_RX_RING_CNT / 8;
1722 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1723 if (BGE_IS_JUMBO_CAPABLE(sc))
1724 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1725 BGE_JUMBO_RX_RING_CNT/8);
1726
1727 /*
1728 * Disable all send rings by setting the 'ring disabled' bit
1729 * in the flags field of all the TX send ring control blocks,
1730 * located in NIC memory.
1731 */
1732 if (!BGE_IS_5705_PLUS(sc))
1733 /* 5700 to 5704 had 16 send rings. */
1734 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1735 else
1736 limit = 1;
1737 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1738 for (i = 0; i < limit; i++) {
1739 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1740 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1741 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1742 vrcb += sizeof(struct bge_rcb);
1743 }
1744
1745 /* Configure send ring RCB 0 (we use only the first ring) */
1746 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1747 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1748 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1749 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1750 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1751 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1752 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1753 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1754
1755 /*
1756 * Disable all receive return rings by setting the
1757 * 'ring diabled' bit in the flags field of all the receive
1758 * return ring control blocks, located in NIC memory.
1759 */
1760 if (!BGE_IS_5705_PLUS(sc))
1761 limit = BGE_RX_RINGS_MAX;
1762 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1763 limit = 4;
1764 else
1765 limit = 1;
1766 /* Disable all receive return rings. */
1767 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1768 for (i = 0; i < limit; i++) {
1769 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1770 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1771 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1772 BGE_RCB_FLAG_RING_DISABLED);
1773 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1774 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1775 (i * (sizeof(uint64_t))), 0);
1776 vrcb += sizeof(struct bge_rcb);
1777 }
1778
1779 /*
1780 * Set up receive return ring 0. Note that the NIC address
1781 * for RX return rings is 0x0. The return rings live entirely
1782 * within the host, so the nicaddr field in the RCB isn't used.
1783 */
1784 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1785 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1786 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1787 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1788 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1789 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1790 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1791
1792 /* Set random backoff seed for TX */
1793 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1794 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1795 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1796 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1797 BGE_TX_BACKOFF_SEED_MASK);
1798
1799 /* Set inter-packet gap */
1800 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1801
1802 /*
1803 * Specify which ring to use for packets that don't match
1804 * any RX rules.
1805 */
1806 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1807
1808 /*
1809 * Configure number of RX lists. One interrupt distribution
1810 * list, sixteen active lists, one bad frames class.
1811 */
1812 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1813
1814 /* Inialize RX list placement stats mask. */
1815 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1816 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1817
1818 /* Disable host coalescing until we get it set up */
1819 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1820
1821 /* Poll to make sure it's shut down. */
1822 for (i = 0; i < BGE_TIMEOUT; i++) {
1823 DELAY(10);
1824 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1825 break;
1826 }
1827
1828 if (i == BGE_TIMEOUT) {
1829 device_printf(sc->bge_dev,
1830 "host coalescing engine failed to idle\n");
1831 return (ENXIO);
1832 }
1833
1834 /* Set up host coalescing defaults */
1835 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1836 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1837 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1838 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1839 if (!(BGE_IS_5705_PLUS(sc))) {
1840 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1841 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1842 }
1843 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1844 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1845
1846 /* Set up address of statistics block */
1847 if (!(BGE_IS_5705_PLUS(sc))) {
1848 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1849 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1850 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1851 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1852 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1853 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1854 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1855 }
1856
1857 /* Set up address of status block */
1858 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1859 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1860 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1861 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1862
1863 /* Set up status block size. */
1864 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1865 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1866 val = BGE_STATBLKSZ_FULL;
1867 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1868 } else {
1869 val = BGE_STATBLKSZ_32BYTE;
1870 bzero(sc->bge_ldata.bge_status_block, 32);
1871 }
1872 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1873 sc->bge_cdata.bge_status_map,
1874 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1875
1876 /* Turn on host coalescing state machine */
1877 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1878
1879 /* Turn on RX BD completion state machine and enable attentions */
1880 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1881 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1882
1883 /* Turn on RX list placement state machine */
1884 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1885
1886 /* Turn on RX list selector state machine. */
1887 if (!(BGE_IS_5705_PLUS(sc)))
1888 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1889
1890 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1891 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1892 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1893 BGE_MACMODE_FRMHDR_DMA_ENB;
1894
1895 if (sc->bge_flags & BGE_FLAG_TBI)
1896 val |= BGE_PORTMODE_TBI;
1897 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1898 val |= BGE_PORTMODE_GMII;
1899 else
1900 val |= BGE_PORTMODE_MII;
1901
1902 /* Turn on DMA, clear stats */
1903 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1904
1905 /* Set misc. local control, enable interrupts on attentions */
1906 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1907
1908#ifdef notdef
1909 /* Assert GPIO pins for PHY reset */
1910 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1911 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1912 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1913 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1914#endif
1915
1916 /* Turn on DMA completion state machine */
1917 if (!(BGE_IS_5705_PLUS(sc)))
1918 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1919
1920 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1921
1922 /* Enable host coalescing bug fix. */
1923 if (BGE_IS_5755_PLUS(sc))
1924 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1925
1926 /* Request larger DMA burst size to get better performance. */
1927 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1928 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1929
1930 /* Turn on write DMA state machine */
1931 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1932 DELAY(40);
1933
1934 /* Turn on read DMA state machine */
1935 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1936 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1937 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1938 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1939 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1940 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1941 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1942 if (sc->bge_flags & BGE_FLAG_PCIE)
1943 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1944 if (sc->bge_flags & BGE_FLAG_TSO) {
1945 val |= BGE_RDMAMODE_TSO4_ENABLE;
1946 if (sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1947 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1948 val |= BGE_RDMAMODE_TSO6_ENABLE;
1949 }
1950 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1951 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1952 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1953 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1954 /*
1955 * Enable fix for read DMA FIFO overruns.
1956 * The fix is to limit the number of RX BDs
1957 * the hardware would fetch at a fime.
1958 */
1959 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1960 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
1961 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1962 }
1963 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1964 DELAY(40);
1965
1966 /* Turn on RX data completion state machine */
1967 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1968
1969 /* Turn on RX BD initiator state machine */
1970 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1971
1972 /* Turn on RX data and RX BD initiator state machine */
1973 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1974
1975 /* Turn on Mbuf cluster free state machine */
1976 if (!(BGE_IS_5705_PLUS(sc)))
1977 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1978
1979 /* Turn on send BD completion state machine */
1980 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1981
1982 /* Turn on send data completion state machine */
1983 val = BGE_SDCMODE_ENABLE;
1984 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1985 val |= BGE_SDCMODE_CDELAY;
1986 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1987
1988 /* Turn on send data initiator state machine */
1989 if (sc->bge_flags & BGE_FLAG_TSO)
1990 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1991 else
1992 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1993
1994 /* Turn on send BD initiator state machine */
1995 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1996
1997 /* Turn on send BD selector state machine */
1998 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1999
2000 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2001 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2002 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2003
2004 /* ack/clear link change events */
2005 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2006 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2007 BGE_MACSTAT_LINK_CHANGED);
2008 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2009
2010 /*
2011 * Enable attention when the link has changed state for
2012 * devices that use auto polling.
2013 */
2014 if (sc->bge_flags & BGE_FLAG_TBI) {
2015 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2016 } else {
2017 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2018 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2019 DELAY(80);
2020 }
2021 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2022 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2023 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2024 BGE_EVTENB_MI_INTERRUPT);
2025 }
2026
2027 /*
2028 * Clear any pending link state attention.
2029 * Otherwise some link state change events may be lost until attention
2030 * is cleared by bge_intr() -> bge_link_upd() sequence.
2031 * It's not necessary on newer BCM chips - perhaps enabling link
2032 * state change attentions implies clearing pending attention.
2033 */
2034 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2035 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2036 BGE_MACSTAT_LINK_CHANGED);
2037
2038 /* Enable link state change attentions. */
2039 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2040
2041 return (0);
2042}
2043
2044const struct bge_revision *
2045bge_lookup_rev(uint32_t chipid)
2046{
2047 const struct bge_revision *br;
2048
2049 for (br = bge_revisions; br->br_name != NULL; br++) {
2050 if (br->br_chipid == chipid)
2051 return (br);
2052 }
2053
2054 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2055 if (br->br_chipid == BGE_ASICREV(chipid))
2056 return (br);
2057 }
2058
2059 return (NULL);
2060}
2061
2062const struct bge_vendor *
2063bge_lookup_vendor(uint16_t vid)
2064{
2065 const struct bge_vendor *v;
2066
2067 for (v = bge_vendors; v->v_name != NULL; v++)
2068 if (v->v_id == vid)
2069 return (v);
2070
2071 panic("%s: unknown vendor %d", __func__, vid);
2072 return (NULL);
2073}
2074
2075/*
2076 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2077 * against our list and return its name if we find a match.
2078 *
2079 * Note that since the Broadcom controller contains VPD support, we
2080 * try to get the device name string from the controller itself instead
2081 * of the compiled-in string. It guarantees we'll always announce the
2082 * right product name. We fall back to the compiled-in string when
2083 * VPD is unavailable or corrupt.
2084 */
2085static int
2086bge_probe(device_t dev)
2087{
2088 const struct bge_type *t = bge_devs;
2089 struct bge_softc *sc = device_get_softc(dev);
2090 uint16_t vid, did;
2091
2092 sc->bge_dev = dev;
2093 vid = pci_get_vendor(dev);
2094 did = pci_get_device(dev);
2095 while(t->bge_vid != 0) {
2096 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2097 char model[64], buf[96];
2098 const struct bge_revision *br;
2099 const struct bge_vendor *v;
2100 uint32_t id;
2101
2102 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2103 BGE_PCIMISCCTL_ASICREV_SHIFT;
2104 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
2105 id = pci_read_config(dev,
2106 BGE_PCI_PRODID_ASICREV, 4);
2107 br = bge_lookup_rev(id);
2108 v = bge_lookup_vendor(vid);
2109 {
2110#if __FreeBSD_version > 700024
2111 const char *pname;
2112
2113 if (bge_has_eaddr(sc) &&
2114 pci_get_vpd_ident(dev, &pname) == 0)
2115 snprintf(model, 64, "%s", pname);
2116 else
2117#endif
2118 snprintf(model, 64, "%s %s",
2119 v->v_name,
2120 br != NULL ? br->br_name :
2121 "NetXtreme Ethernet Controller");
2122 }
2123 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2124 br != NULL ? "" : "unknown ", id);
2125 device_set_desc_copy(dev, buf);
2126 return (0);
2127 }
2128 t++;
2129 }
2130
2131 return (ENXIO);
2132}
2133
2134static void
2135bge_dma_free(struct bge_softc *sc)
2136{
2137 int i;
2138
2139 /* Destroy DMA maps for RX buffers. */
2140 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2141 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2142 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2143 sc->bge_cdata.bge_rx_std_dmamap[i]);
2144 }
2145 if (sc->bge_cdata.bge_rx_std_sparemap)
2146 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2147 sc->bge_cdata.bge_rx_std_sparemap);
2148
2149 /* Destroy DMA maps for jumbo RX buffers. */
2150 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2151 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2152 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2153 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2154 }
2155 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2156 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2157 sc->bge_cdata.bge_rx_jumbo_sparemap);
2158
2159 /* Destroy DMA maps for TX buffers. */
2160 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2161 if (sc->bge_cdata.bge_tx_dmamap[i])
2162 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2163 sc->bge_cdata.bge_tx_dmamap[i]);
2164 }
2165
2166 if (sc->bge_cdata.bge_rx_mtag)
2167 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2168 if (sc->bge_cdata.bge_tx_mtag)
2169 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2170
2171
2172 /* Destroy standard RX ring. */
2173 if (sc->bge_cdata.bge_rx_std_ring_map)
2174 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2175 sc->bge_cdata.bge_rx_std_ring_map);
2176 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2177 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2178 sc->bge_ldata.bge_rx_std_ring,
2179 sc->bge_cdata.bge_rx_std_ring_map);
2180
2181 if (sc->bge_cdata.bge_rx_std_ring_tag)
2182 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2183
2184 /* Destroy jumbo RX ring. */
2185 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2186 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2187 sc->bge_cdata.bge_rx_jumbo_ring_map);
2188
2189 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2190 sc->bge_ldata.bge_rx_jumbo_ring)
2191 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2192 sc->bge_ldata.bge_rx_jumbo_ring,
2193 sc->bge_cdata.bge_rx_jumbo_ring_map);
2194
2195 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2196 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2197
2198 /* Destroy RX return ring. */
2199 if (sc->bge_cdata.bge_rx_return_ring_map)
2200 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2201 sc->bge_cdata.bge_rx_return_ring_map);
2202
2203 if (sc->bge_cdata.bge_rx_return_ring_map &&
2204 sc->bge_ldata.bge_rx_return_ring)
2205 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2206 sc->bge_ldata.bge_rx_return_ring,
2207 sc->bge_cdata.bge_rx_return_ring_map);
2208
2209 if (sc->bge_cdata.bge_rx_return_ring_tag)
2210 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2211
2212 /* Destroy TX ring. */
2213 if (sc->bge_cdata.bge_tx_ring_map)
2214 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2215 sc->bge_cdata.bge_tx_ring_map);
2216
2217 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2218 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2219 sc->bge_ldata.bge_tx_ring,
2220 sc->bge_cdata.bge_tx_ring_map);
2221
2222 if (sc->bge_cdata.bge_tx_ring_tag)
2223 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2224
2225 /* Destroy status block. */
2226 if (sc->bge_cdata.bge_status_map)
2227 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2228 sc->bge_cdata.bge_status_map);
2229
2230 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2231 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2232 sc->bge_ldata.bge_status_block,
2233 sc->bge_cdata.bge_status_map);
2234
2235 if (sc->bge_cdata.bge_status_tag)
2236 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2237
2238 /* Destroy statistics block. */
2239 if (sc->bge_cdata.bge_stats_map)
2240 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2241 sc->bge_cdata.bge_stats_map);
2242
2243 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2244 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2245 sc->bge_ldata.bge_stats,
2246 sc->bge_cdata.bge_stats_map);
2247
2248 if (sc->bge_cdata.bge_stats_tag)
2249 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2250
2251 if (sc->bge_cdata.bge_buffer_tag)
2252 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2253
2254 /* Destroy the parent tag. */
2255 if (sc->bge_cdata.bge_parent_tag)
2256 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2257}
2258
2259static int
2260bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2261 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2262 bus_addr_t *paddr, const char *msg)
2263{
2264 struct bge_dmamap_arg ctx;
2265 bus_addr_t lowaddr;
2266 bus_size_t ring_end;
2267 int error;
2268
2269 lowaddr = BUS_SPACE_MAXADDR;
2270again:
2271 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2272 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2273 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2274 if (error != 0) {
2275 device_printf(sc->bge_dev,
2276 "could not create %s dma tag\n", msg);
2277 return (ENOMEM);
2278 }
2279 /* Allocate DMA'able memory for ring. */
2280 error = bus_dmamem_alloc(*tag, (void **)ring,
2281 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2282 if (error != 0) {
2283 device_printf(sc->bge_dev,
2284 "could not allocate DMA'able memory for %s\n", msg);
2285 return (ENOMEM);
2286 }
2287 /* Load the address of the ring. */
2288 ctx.bge_busaddr = 0;
2289 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2290 &ctx, BUS_DMA_NOWAIT);
2291 if (error != 0) {
2292 device_printf(sc->bge_dev,
2293 "could not load DMA'able memory for %s\n", msg);
2294 return (ENOMEM);
2295 }
2296 *paddr = ctx.bge_busaddr;
2297 ring_end = *paddr + maxsize;
2298 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2299 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2300 /*
2301 * 4GB boundary crossed. Limit maximum allowable DMA
2302 * address space to 32bit and try again.
2303 */
2304 bus_dmamap_unload(*tag, *map);
2305 bus_dmamem_free(*tag, *ring, *map);
2306 bus_dma_tag_destroy(*tag);
2307 if (bootverbose)
2308 device_printf(sc->bge_dev, "4GB boundary crossed, "
2309 "limit DMA address space to 32bit for %s\n", msg);
2310 *ring = NULL;
2311 *tag = NULL;
2312 *map = NULL;
2313 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2314 goto again;
2315 }
2316 return (0);
2317}
2318
2319static int
2320bge_dma_alloc(struct bge_softc *sc)
2321{
2322 bus_addr_t lowaddr;
2323 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2324 int i, error;
2325
2326 lowaddr = BUS_SPACE_MAXADDR;
2327 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2328 lowaddr = BGE_DMA_MAXADDR;
2329 /*
2330 * Allocate the parent bus DMA tag appropriate for PCI.
2331 */
2332 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2333 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2334 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2335 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2336 if (error != 0) {
2337 device_printf(sc->bge_dev,
2338 "could not allocate parent dma tag\n");
2339 return (ENOMEM);
2340 }
2341
2342 /* Create tag for standard RX ring. */
2343 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2344 &sc->bge_cdata.bge_rx_std_ring_tag,
2345 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2346 &sc->bge_cdata.bge_rx_std_ring_map,
2347 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2348 if (error)
2349 return (error);
2350
2351 /* Create tag for RX return ring. */
2352 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2353 &sc->bge_cdata.bge_rx_return_ring_tag,
2354 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2355 &sc->bge_cdata.bge_rx_return_ring_map,
2356 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2357 if (error)
2358 return (error);
2359
2360 /* Create tag for TX ring. */
2361 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2362 &sc->bge_cdata.bge_tx_ring_tag,
2363 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2364 &sc->bge_cdata.bge_tx_ring_map,
2365 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2366 if (error)
2367 return (error);
2368
2369 /*
2370 * Create tag for status block.
2371 * Because we only use single Tx/Rx/Rx return ring, use
2372 * minimum status block size except BCM5700 AX/BX which
2373 * seems to want to see full status block size regardless
2374 * of configured number of ring.
2375 */
2376 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2377 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2378 sbsz = BGE_STATUS_BLK_SZ;
2379 else
2380 sbsz = 32;
2381 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2382 &sc->bge_cdata.bge_status_tag,
2383 (uint8_t **)&sc->bge_ldata.bge_status_block,
2384 &sc->bge_cdata.bge_status_map,
2385 &sc->bge_ldata.bge_status_block_paddr, "status block");
2386 if (error)
2387 return (error);
2388
2389 /* Create tag for statistics block. */
2390 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2391 &sc->bge_cdata.bge_stats_tag,
2392 (uint8_t **)&sc->bge_ldata.bge_stats,
2393 &sc->bge_cdata.bge_stats_map,
2394 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2395 if (error)
2396 return (error);
2397
2398 /* Create tag for jumbo RX ring. */
2399 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2400 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2401 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2402 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2403 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2404 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2405 if (error)
2406 return (error);
2407 }
2408
2409 /* Create parent tag for buffers. */
2410 boundary = 0;
2411 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2412 boundary = BGE_DMA_BNDRY;
2413 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2414 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2415 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2416 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2417 if (error != 0) {
2418 device_printf(sc->bge_dev,
2419 "could not allocate buffer dma tag\n");
2420 return (ENOMEM);
2421 }
2422 /* Create tag for Tx mbufs. */
2423 if (sc->bge_flags & BGE_FLAG_TSO) {
2424 txsegsz = BGE_TSOSEG_SZ;
2425 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2426 } else {
2427 txsegsz = MCLBYTES;
2428 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2429 }
2430 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2431 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2432 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2433 &sc->bge_cdata.bge_tx_mtag);
2434
2435 if (error) {
2436 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2437 return (ENOMEM);
2438 }
2439
2440 /* Create tag for Rx mbufs. */
2441 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2442 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2443 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2444
2445 if (error) {
2446 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2447 return (ENOMEM);
2448 }
2449
2450 /* Create DMA maps for RX buffers. */
2451 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2452 &sc->bge_cdata.bge_rx_std_sparemap);
2453 if (error) {
2454 device_printf(sc->bge_dev,
2455 "can't create spare DMA map for RX\n");
2456 return (ENOMEM);
2457 }
2458 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2459 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2460 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2461 if (error) {
2462 device_printf(sc->bge_dev,
2463 "can't create DMA map for RX\n");
2464 return (ENOMEM);
2465 }
2466 }
2467
2468 /* Create DMA maps for TX buffers. */
2469 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2470 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2471 &sc->bge_cdata.bge_tx_dmamap[i]);
2472 if (error) {
2473 device_printf(sc->bge_dev,
2474 "can't create DMA map for TX\n");
2475 return (ENOMEM);
2476 }
2477 }
2478
2479 /* Create tags for jumbo RX buffers. */
2480 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2481 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2482 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2483 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2484 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2485 if (error) {
2486 device_printf(sc->bge_dev,
2487 "could not allocate jumbo dma tag\n");
2488 return (ENOMEM);
2489 }
2490 /* Create DMA maps for jumbo RX buffers. */
2491 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2492 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2493 if (error) {
2494 device_printf(sc->bge_dev,
2495 "can't create spare DMA map for jumbo RX\n");
2496 return (ENOMEM);
2497 }
2498 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2499 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2500 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2501 if (error) {
2502 device_printf(sc->bge_dev,
2503 "can't create DMA map for jumbo RX\n");
2504 return (ENOMEM);
2505 }
2506 }
2507 }
2508
2509 return (0);
2510}
2511
2512/*
2513 * Return true if this device has more than one port.
2514 */
2515static int
2516bge_has_multiple_ports(struct bge_softc *sc)
2517{
2518 device_t dev = sc->bge_dev;
2519 u_int b, d, f, fscan, s;
2520
2521 d = pci_get_domain(dev);
2522 b = pci_get_bus(dev);
2523 s = pci_get_slot(dev);
2524 f = pci_get_function(dev);
2525 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2526 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2527 return (1);
2528 return (0);
2529}
2530
2531/*
2532 * Return true if MSI can be used with this device.
2533 */
2534static int
2535bge_can_use_msi(struct bge_softc *sc)
2536{
2537 int can_use_msi = 0;
2538
2539 switch (sc->bge_asicrev) {
2540 case BGE_ASICREV_BCM5714_A0:
2541 case BGE_ASICREV_BCM5714:
2542 /*
2543 * Apparently, MSI doesn't work when these chips are
2544 * configured in single-port mode.
2545 */
2546 if (bge_has_multiple_ports(sc))
2547 can_use_msi = 1;
2548 break;
2549 case BGE_ASICREV_BCM5750:
2550 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2551 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2552 can_use_msi = 1;
2553 break;
2554 default:
2555 if (BGE_IS_575X_PLUS(sc))
2556 can_use_msi = 1;
2557 }
2558 return (can_use_msi);
2559}
2560
2561static int
2562bge_attach(device_t dev)
2563{
2564 struct ifnet *ifp;
2565 struct bge_softc *sc;
2566 uint32_t hwcfg = 0, misccfg;
2567 u_char eaddr[ETHER_ADDR_LEN];
2568 int error, msicount, phy_addr, reg, rid, trys;
2569
2570 sc = device_get_softc(dev);
2571 sc->bge_dev = dev;
2572
2573 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2574
2575 /*
2576 * Map control/status registers.
2577 */
2578 pci_enable_busmaster(dev);
2579
2580 rid = PCIR_BAR(0);
2581 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2582 RF_ACTIVE);
2583
2584 if (sc->bge_res == NULL) {
2585 device_printf (sc->bge_dev, "couldn't map memory\n");
2586 error = ENXIO;
2587 goto fail;
2588 }
2589
2590 /* Save various chip information. */
2591 sc->bge_chipid =
2592 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2593 BGE_PCIMISCCTL_ASICREV_SHIFT;
2594 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2595 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2596 4);
2597 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2598 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2599
2600 /* Set default PHY address. */
2601 phy_addr = 1;
2602
2603 /*
2604 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2605 * 5705 A0 and A1 chips.
2606 */
2607 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2608 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2609 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2610 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2611 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2612
2613 if (bge_has_eaddr(sc))
2614 sc->bge_flags |= BGE_FLAG_EADDR;
2615
2616 /* Save chipset family. */
2617 switch (sc->bge_asicrev) {
2618 case BGE_ASICREV_BCM5755:
2619 case BGE_ASICREV_BCM5761:
2620 case BGE_ASICREV_BCM5784:
2621 case BGE_ASICREV_BCM5785:
2622 case BGE_ASICREV_BCM5787:
2623 case BGE_ASICREV_BCM57780:
2624 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2625 BGE_FLAG_5705_PLUS;
2626 break;
2627 case BGE_ASICREV_BCM5700:
2628 case BGE_ASICREV_BCM5701:
2629 case BGE_ASICREV_BCM5703:
2630 case BGE_ASICREV_BCM5704:
2631 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2632 break;
2633 case BGE_ASICREV_BCM5714_A0:
2634 case BGE_ASICREV_BCM5780:
2635 case BGE_ASICREV_BCM5714:
2636 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2637 /* FALLTHROUGH */
2638 case BGE_ASICREV_BCM5750:
2639 case BGE_ASICREV_BCM5752:
2640 case BGE_ASICREV_BCM5906:
2641 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2642 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2643 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2644 /* FALLTHROUGH */
2645 case BGE_ASICREV_BCM5705:
2646 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2647 break;
2648 }
2649
2650 /* Set various PHY bug flags. */
2651 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2652 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2653 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2654 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2655 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2656 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2657 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2658 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2659 if (pci_get_subvendor(dev) == DELL_VENDORID)
2660 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2661 if ((BGE_IS_5705_PLUS(sc)) &&
2662 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2663 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2664 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2665 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2666 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2667 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2668 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2669 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2670 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2671 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2672 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2673 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2674 } else
2675 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2676 }
2677
2678 /* Identify the chips that use an CPMU. */
2679 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2680 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2681 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2682 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2683 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2684 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2685 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2686 else
2687 sc->bge_mi_mode = BGE_MIMODE_BASE;
2688 /* Enable auto polling for BCM570[0-5]. */
2689 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2690 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2691
2692 /*
2693 * All controllers that are not 5755 or higher have 4GB
2694 * boundary DMA bug.
2695 * Whenever an address crosses a multiple of the 4GB boundary
2696 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2697 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2698 * state machine will lockup and cause the device to hang.
2699 */
2700 if (BGE_IS_5755_PLUS(sc) == 0)
2701 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2702
2703 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2704 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2705 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2706 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2707 sc->bge_flags |= BGE_FLAG_5788;
2708 }
2709
2710 /*
2711 * Some controllers seem to require a special firmware to use
2712 * TSO. But the firmware is not available to FreeBSD and Linux
2713 * claims that the TSO performed by the firmware is slower than
2714 * hardware based TSO. Moreover the firmware based TSO has one
2715 * known bug which can't handle TSO if ethernet header + IP/TCP
2716 * header is greater than 80 bytes. The workaround for the TSO
2717 * bug exist but it seems it's too expensive than not using
2718 * TSO at all. Some hardwares also have the TSO bug so limit
2719 * the TSO to the controllers that are not affected TSO issues
2720 * (e.g. 5755 or higher).
2721 */
2722 if (BGE_IS_5755_PLUS(sc)) {
2723 /*
2724 * BCM5754 and BCM5787 shares the same ASIC id so
2725 * explicit device id check is required.
2726 * Due to unknown reason TSO does not work on BCM5755M.
2727 */
2728 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2729 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2730 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2731 sc->bge_flags |= BGE_FLAG_TSO;
2732 }
2733
2734 /*
2735 * Check if this is a PCI-X or PCI Express device.
2736 */
2737 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2738 /*
2739 * Found a PCI Express capabilities register, this
2740 * must be a PCI Express device.
2741 */
2742 sc->bge_flags |= BGE_FLAG_PCIE;
2743 sc->bge_expcap = reg;
2744 if (pci_get_max_read_req(dev) != 4096)
2745 pci_set_max_read_req(dev, 4096);
2746 } else {
2747 /*
2748 * Check if the device is in PCI-X Mode.
2749 * (This bit is not valid on PCI Express controllers.)
2750 */
2751 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2752 sc->bge_pcixcap = reg;
2753 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2754 BGE_PCISTATE_PCI_BUSMODE) == 0)
2755 sc->bge_flags |= BGE_FLAG_PCIX;
2756 }
2757
2758 /*
2759 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2760 * not actually a MAC controller bug but an issue with the embedded
2761 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2762 */
2763 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2764 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2765 /*
2766 * Allocate the interrupt, using MSI if possible. These devices
2767 * support 8 MSI messages, but only the first one is used in
2768 * normal operation.
2769 */
2770 rid = 0;
2771 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2772 sc->bge_msicap = reg;
2773 if (bge_can_use_msi(sc)) {
2774 msicount = pci_msi_count(dev);
2775 if (msicount > 1)
2776 msicount = 1;
2777 } else
2778 msicount = 0;
2779 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2780 rid = 1;
2781 sc->bge_flags |= BGE_FLAG_MSI;
2782 }
2783 }
2784
2785 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2786 RF_SHAREABLE | RF_ACTIVE);
2787
2788 if (sc->bge_irq == NULL) {
2789 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2790 error = ENXIO;
2791 goto fail;
2792 }
2793
2794 device_printf(dev,
2795 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2796 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2797 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2798 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2799
2800 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2801
2802 /* Try to reset the chip. */
2803 if (bge_reset(sc)) {
2804 device_printf(sc->bge_dev, "chip reset failed\n");
2805 error = ENXIO;
2806 goto fail;
2807 }
2808
2809 sc->bge_asf_mode = 0;
2810 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2811 == BGE_MAGIC_NUMBER)) {
2812 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2813 & BGE_HWCFG_ASF) {
2814 sc->bge_asf_mode |= ASF_ENABLE;
2815 sc->bge_asf_mode |= ASF_STACKUP;
2816 if (BGE_IS_575X_PLUS(sc))
2817 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2818 }
2819 }
2820
2821 /* Try to reset the chip again the nice way. */
2822 bge_stop_fw(sc);
2823 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2824 if (bge_reset(sc)) {
2825 device_printf(sc->bge_dev, "chip reset failed\n");
2826 error = ENXIO;
2827 goto fail;
2828 }
2829
2830 bge_sig_legacy(sc, BGE_RESET_STOP);
2831 bge_sig_post_reset(sc, BGE_RESET_STOP);
2832
2833 if (bge_chipinit(sc)) {
2834 device_printf(sc->bge_dev, "chip initialization failed\n");
2835 error = ENXIO;
2836 goto fail;
2837 }
2838
2839 error = bge_get_eaddr(sc, eaddr);
2840 if (error) {
2841 device_printf(sc->bge_dev,
2842 "failed to read station address\n");
2843 error = ENXIO;
2844 goto fail;
2845 }
2846
2847 /* 5705 limits RX return ring to 512 entries. */
2848 if (BGE_IS_5705_PLUS(sc))
2849 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2850 else
2851 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2852
2853 if (bge_dma_alloc(sc)) {
2854 device_printf(sc->bge_dev,
2855 "failed to allocate DMA resources\n");
2856 error = ENXIO;
2857 goto fail;
2858 }
2859
2860 bge_add_sysctls(sc);
2861
2862 /* Set default tuneable values. */
2863 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2864 sc->bge_rx_coal_ticks = 150;
2865 sc->bge_tx_coal_ticks = 150;
2866 sc->bge_rx_max_coal_bds = 10;
2867 sc->bge_tx_max_coal_bds = 10;
2868
2869 /* Initialize checksum features to use. */
2870 sc->bge_csum_features = BGE_CSUM_FEATURES;
2871 if (sc->bge_forced_udpcsum != 0)
2872 sc->bge_csum_features |= CSUM_UDP;
2873
2874 /* Set up ifnet structure */
2875 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2876 if (ifp == NULL) {
2877 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2878 error = ENXIO;
2879 goto fail;
2880 }
2881 ifp->if_softc = sc;
2882 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2883 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2884 ifp->if_ioctl = bge_ioctl;
2885 ifp->if_start = bge_start;
2886 ifp->if_init = bge_init;
2887 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2888 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2889 IFQ_SET_READY(&ifp->if_snd);
2890 ifp->if_hwassist = sc->bge_csum_features;
2891 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2892 IFCAP_VLAN_MTU;
2893 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2894 ifp->if_hwassist |= CSUM_TSO;
2895 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
2896 }
2897#ifdef IFCAP_VLAN_HWCSUM
2898 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2899#endif
2900 ifp->if_capenable = ifp->if_capabilities;
2901#ifdef DEVICE_POLLING
2902 ifp->if_capabilities |= IFCAP_POLLING;
2903#endif
2904
2905 /*
2906 * 5700 B0 chips do not support checksumming correctly due
2907 * to hardware bugs.
2908 */
2909 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2910 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2911 ifp->if_capenable &= ~IFCAP_HWCSUM;
2912 ifp->if_hwassist = 0;
2913 }
2914
2915 /*
2916 * Figure out what sort of media we have by checking the
2917 * hardware config word in the first 32k of NIC internal memory,
2918 * or fall back to examining the EEPROM if necessary.
2919 * Note: on some BCM5700 cards, this value appears to be unset.
2920 * If that's the case, we have to rely on identifying the NIC
2921 * by its PCI subsystem ID, as we do below for the SysKonnect
2922 * SK-9D41.
2923 */
2924 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2925 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2926 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2927 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2928 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2929 sizeof(hwcfg))) {
2930 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2931 error = ENXIO;
2932 goto fail;
2933 }
2934 hwcfg = ntohl(hwcfg);
2935 }
2936
2937 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2938 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
2939 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2940 if (BGE_IS_5714_FAMILY(sc))
2941 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2942 else
2943 sc->bge_flags |= BGE_FLAG_TBI;
2944 }
2945
2946 if (sc->bge_flags & BGE_FLAG_TBI) {
2947 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2948 bge_ifmedia_sts);
2949 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2950 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2951 0, NULL);
2952 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2953 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2954 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2955 } else {
2956 /*
2957 * Do transceiver setup and tell the firmware the
2958 * driver is down so we can try to get access the
2959 * probe if ASF is running. Retry a couple of times
2960 * if we get a conflict with the ASF firmware accessing
2961 * the PHY.
2962 */
2963 trys = 0;
2964 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2965again:
2966 bge_asf_driver_up(sc);
2967
2968 error = (mii_attach(dev, &sc->bge_miibus, ifp,
2969 bge_ifmedia_upd, bge_ifmedia_sts, BMSR_DEFCAPMASK,
2970 phy_addr, MII_OFFSET_ANY, 0));
2971 if (error != 0) {
2972 if (trys++ < 4) {
2973 device_printf(sc->bge_dev, "Try again\n");
2974 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2975 BMCR_RESET);
2976 goto again;
2977 }
2978 device_printf(sc->bge_dev, "attaching PHYs failed\n");
2979 goto fail;
2980 }
2981
2982 /*
2983 * Now tell the firmware we are going up after probing the PHY
2984 */
2985 if (sc->bge_asf_mode & ASF_STACKUP)
2986 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2987 }
2988
2989 /*
2990 * When using the BCM5701 in PCI-X mode, data corruption has
2991 * been observed in the first few bytes of some received packets.
2992 * Aligning the packet buffer in memory eliminates the corruption.
2993 * Unfortunately, this misaligns the packet payloads. On platforms
2994 * which do not support unaligned accesses, we will realign the
2995 * payloads by copying the received packets.
2996 */
2997 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2998 sc->bge_flags & BGE_FLAG_PCIX)
2999 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3000
3001 /*
3002 * Call MI attach routine.
3003 */
3004 ether_ifattach(ifp, eaddr);
3005 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3006
3007 /* Tell upper layer we support long frames. */
3008 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3009
3010 /*
3011 * Hookup IRQ last.
3012 */
3013#if __FreeBSD_version > 700030
3014 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3015 /* Take advantage of single-shot MSI. */
3016 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3017 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3018 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3019 taskqueue_thread_enqueue, &sc->bge_tq);
3020 if (sc->bge_tq == NULL) {
3021 device_printf(dev, "could not create taskqueue.\n");
3022 ether_ifdetach(ifp);
3023 error = ENXIO;
3024 goto fail;
3025 }
3026 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3027 device_get_nameunit(sc->bge_dev));
3028 error = bus_setup_intr(dev, sc->bge_irq,
3029 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3030 &sc->bge_intrhand);
3031 if (error)
3032 ether_ifdetach(ifp);
3033 } else
3034 error = bus_setup_intr(dev, sc->bge_irq,
3035 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3036 &sc->bge_intrhand);
3037#else
3038 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
3039 bge_intr, sc, &sc->bge_intrhand);
3040#endif
3041
3042 if (error) {
3043 bge_detach(dev);
3044 device_printf(sc->bge_dev, "couldn't set up irq\n");
3045 }
3046
3047 return (0);
3048
3049fail:
3050 bge_release_resources(sc);
3051
3052 return (error);
3053}
3054
3055static int
3056bge_detach(device_t dev)
3057{
3058 struct bge_softc *sc;
3059 struct ifnet *ifp;
3060
3061 sc = device_get_softc(dev);
3062 ifp = sc->bge_ifp;
3063
3064#ifdef DEVICE_POLLING
3065 if (ifp->if_capenable & IFCAP_POLLING)
3066 ether_poll_deregister(ifp);
3067#endif
3068
3069 BGE_LOCK(sc);
3070 bge_stop(sc);
3071 bge_reset(sc);
3072 BGE_UNLOCK(sc);
3073
3074 callout_drain(&sc->bge_stat_ch);
3075
3076 if (sc->bge_tq)
3077 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3078 ether_ifdetach(ifp);
3079
3080 if (sc->bge_flags & BGE_FLAG_TBI) {
3081 ifmedia_removeall(&sc->bge_ifmedia);
3082 } else {
3083 bus_generic_detach(dev);
3084 device_delete_child(dev, sc->bge_miibus);
3085 }
3086
3087 bge_release_resources(sc);
3088
3089 return (0);
3090}
3091
3092static void
3093bge_release_resources(struct bge_softc *sc)
3094{
3095 device_t dev;
3096
3097 dev = sc->bge_dev;
3098
3099 if (sc->bge_tq != NULL)
3100 taskqueue_free(sc->bge_tq);
3101
3102 if (sc->bge_intrhand != NULL)
3103 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3104
3105 if (sc->bge_irq != NULL)
3106 bus_release_resource(dev, SYS_RES_IRQ,
3107 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3108
3109 if (sc->bge_flags & BGE_FLAG_MSI)
3110 pci_release_msi(dev);
3111
3112 if (sc->bge_res != NULL)
3113 bus_release_resource(dev, SYS_RES_MEMORY,
3114 PCIR_BAR(0), sc->bge_res);
3115
3116 if (sc->bge_ifp != NULL)
3117 if_free(sc->bge_ifp);
3118
3119 bge_dma_free(sc);
3120
3121 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3122 BGE_LOCK_DESTROY(sc);
3123}
3124
3125static int
3126bge_reset(struct bge_softc *sc)
3127{
3128 device_t dev;
3129 uint32_t cachesize, command, pcistate, reset, val;
3130 void (*write_op)(struct bge_softc *, int, int);
3131 uint16_t devctl;
3132 int i;
3133
3134 dev = sc->bge_dev;
3135
3136 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3137 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3138 if (sc->bge_flags & BGE_FLAG_PCIE)
3139 write_op = bge_writemem_direct;
3140 else
3141 write_op = bge_writemem_ind;
3142 } else
3143 write_op = bge_writereg_ind;
3144
3145 /* Save some important PCI state. */
3146 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3147 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3148 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3149
3150 pci_write_config(dev, BGE_PCI_MISC_CTL,
3151 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3152 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3153
3154 /* Disable fastboot on controllers that support it. */
3155 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3156 BGE_IS_5755_PLUS(sc)) {
3157 if (bootverbose)
3158 device_printf(dev, "Disabling fastboot\n");
3159 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3160 }
3161
3162 /*
3163 * Write the magic number to SRAM at offset 0xB50.
3164 * When firmware finishes its initialization it will
3165 * write ~BGE_MAGIC_NUMBER to the same location.
3166 */
3167 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3168
3169 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3170
3171 /* XXX: Broadcom Linux driver. */
3172 if (sc->bge_flags & BGE_FLAG_PCIE) {
3173 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3174 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3175 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3176 /* Prevent PCIE link training during global reset */
3177 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3178 reset |= 1 << 29;
3179 }
3180 }
3181
3182 /*
3183 * Set GPHY Power Down Override to leave GPHY
3184 * powered up in D0 uninitialized.
3185 */
3186 if (BGE_IS_5705_PLUS(sc))
3187 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3188
3189 /* Issue global reset */
3190 write_op(sc, BGE_MISC_CFG, reset);
3191
3192 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3193 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3194 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3195 val | BGE_VCPU_STATUS_DRV_RESET);
3196 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3197 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3198 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3199 }
3200
3201 DELAY(1000);
3202
3203 /* XXX: Broadcom Linux driver. */
3204 if (sc->bge_flags & BGE_FLAG_PCIE) {
3205 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3206 DELAY(500000); /* wait for link training to complete */
3207 val = pci_read_config(dev, 0xC4, 4);
3208 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3209 }
3210 devctl = pci_read_config(dev,
3211 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3212 /* Clear enable no snoop and disable relaxed ordering. */
3213 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3214 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3215 /* Set PCIE max payload size to 128. */
3216 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3217 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3218 devctl, 2);
3219 /* Clear error status. */
3220 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3221 PCIM_EXP_STA_CORRECTABLE_ERROR |
3222 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3223 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3224 }
3225
3226 /* Reset some of the PCI state that got zapped by reset. */
3227 pci_write_config(dev, BGE_PCI_MISC_CTL,
3228 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3229 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3230 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3231 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3232 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3233 /*
3234 * Disable PCI-X relaxed ordering to ensure status block update
3235 * comes first then packet buffer DMA. Otherwise driver may
3236 * read stale status block.
3237 */
3238 if (sc->bge_flags & BGE_FLAG_PCIX) {
3239 devctl = pci_read_config(dev,
3240 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3241 devctl &= ~PCIXM_COMMAND_ERO;
3242 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3243 devctl &= ~PCIXM_COMMAND_MAX_READ;
3244 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3245 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3246 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3247 PCIXM_COMMAND_MAX_READ);
3248 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3249 }
3250 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3251 devctl, 2);
3252 }
3253 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3254 if (BGE_IS_5714_FAMILY(sc)) {
3255 /* This chip disables MSI on reset. */
3256 if (sc->bge_flags & BGE_FLAG_MSI) {
3257 val = pci_read_config(dev,
3258 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3259 pci_write_config(dev,
3260 sc->bge_msicap + PCIR_MSI_CTRL,
3261 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3262 val = CSR_READ_4(sc, BGE_MSI_MODE);
3263 CSR_WRITE_4(sc, BGE_MSI_MODE,
3264 val | BGE_MSIMODE_ENABLE);
3265 }
3266 val = CSR_READ_4(sc, BGE_MARB_MODE);
3267 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3268 } else
3269 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3270
3271 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3272 for (i = 0; i < BGE_TIMEOUT; i++) {
3273 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3274 if (val & BGE_VCPU_STATUS_INIT_DONE)
3275 break;
3276 DELAY(100);
3277 }
3278 if (i == BGE_TIMEOUT) {
3279 device_printf(dev, "reset timed out\n");
3280 return (1);
3281 }
3282 } else {
3283 /*
3284 * Poll until we see the 1's complement of the magic number.
3285 * This indicates that the firmware initialization is complete.
3286 * We expect this to fail if no chip containing the Ethernet
3287 * address is fitted though.
3288 */
3289 for (i = 0; i < BGE_TIMEOUT; i++) {
3290 DELAY(10);
3291 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3292 if (val == ~BGE_MAGIC_NUMBER)
3293 break;
3294 }
3295
3296 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3297 device_printf(dev,
3298 "firmware handshake timed out, found 0x%08x\n",
3299 val);
3300 }
3301
3302 /*
3303 * XXX Wait for the value of the PCISTATE register to
3304 * return to its original pre-reset state. This is a
3305 * fairly good indicator of reset completion. If we don't
3306 * wait for the reset to fully complete, trying to read
3307 * from the device's non-PCI registers may yield garbage
3308 * results.
3309 */
3310 for (i = 0; i < BGE_TIMEOUT; i++) {
3311 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3312 break;
3313 DELAY(10);
3314 }
3315
3316 /* Fix up byte swapping. */
3317 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3318 BGE_MODECTL_BYTESWAP_DATA);
3319
3320 /* Tell the ASF firmware we are up */
3321 if (sc->bge_asf_mode & ASF_STACKUP)
3322 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3323
3324 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3325
3326 /*
3327 * The 5704 in TBI mode apparently needs some special
3328 * adjustment to insure the SERDES drive level is set
3329 * to 1.2V.
3330 */
3331 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3332 sc->bge_flags & BGE_FLAG_TBI) {
3333 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3334 val = (val & ~0xFFF) | 0x880;
3335 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3336 }
3337
3338 /* XXX: Broadcom Linux driver. */
3339 if (sc->bge_flags & BGE_FLAG_PCIE &&
3340 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3341 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3342 /* Enable Data FIFO protection. */
3343 val = CSR_READ_4(sc, 0x7C00);
3344 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3345 }
3346 DELAY(10000);
3347
3348 return (0);
3349}
3350
3351static __inline void
3352bge_rxreuse_std(struct bge_softc *sc, int i)
3353{
3354 struct bge_rx_bd *r;
3355
3356 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3357 r->bge_flags = BGE_RXBDFLAG_END;
3358 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3359 r->bge_idx = i;
3360 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3361}
3362
3363static __inline void
3364bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3365{
3366 struct bge_extrx_bd *r;
3367
3368 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3369 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3370 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3371 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3372 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3373 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3374 r->bge_idx = i;
3375 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3376}
3377
3378/*
3379 * Frame reception handling. This is called if there's a frame
3380 * on the receive return list.
3381 *
3382 * Note: we have to be able to handle two possibilities here:
3383 * 1) the frame is from the jumbo receive ring
3384 * 2) the frame is from the standard receive ring
3385 */
3386
3387static int
3388bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3389{
3390 struct ifnet *ifp;
3391 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3392 uint16_t rx_cons;
3393
3394 rx_cons = sc->bge_rx_saved_considx;
3395
3396 /* Nothing to do. */
3397 if (rx_cons == rx_prod)
3398 return (rx_npkts);
3399
3400 ifp = sc->bge_ifp;
3401
3402 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3403 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3404 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3405 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3406 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3407 (MCLBYTES - ETHER_ALIGN))
3408 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3409 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3410
3411 while (rx_cons != rx_prod) {
3412 struct bge_rx_bd *cur_rx;
3413 uint32_t rxidx;
3414 struct mbuf *m = NULL;
3415 uint16_t vlan_tag = 0;
3416 int have_tag = 0;
3417
3418#ifdef DEVICE_POLLING
3419 if (ifp->if_capenable & IFCAP_POLLING) {
3420 if (sc->rxcycles <= 0)
3421 break;
3422 sc->rxcycles--;
3423 }
3424#endif
3425
3426 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3427
3428 rxidx = cur_rx->bge_idx;
3429 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3430
3431 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3432 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3433 have_tag = 1;
3434 vlan_tag = cur_rx->bge_vlan_tag;
3435 }
3436
3437 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3438 jumbocnt++;
3439 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3440 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3441 bge_rxreuse_jumbo(sc, rxidx);
3442 continue;
3443 }
3444 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3445 bge_rxreuse_jumbo(sc, rxidx);
3446 ifp->if_iqdrops++;
3447 continue;
3448 }
3449 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3450 } else {
3451 stdcnt++;
3452 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3453 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3454 bge_rxreuse_std(sc, rxidx);
3455 continue;
3456 }
3457 if (bge_newbuf_std(sc, rxidx) != 0) {
3458 bge_rxreuse_std(sc, rxidx);
3459 ifp->if_iqdrops++;
3460 continue;
3461 }
3462 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3463 }
3464
3465 ifp->if_ipackets++;
3466#ifndef __NO_STRICT_ALIGNMENT
3467 /*
3468 * For architectures with strict alignment we must make sure
3469 * the payload is aligned.
3470 */
3471 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3472 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3473 cur_rx->bge_len);
3474 m->m_data += ETHER_ALIGN;
3475 }
3476#endif
3477 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3478 m->m_pkthdr.rcvif = ifp;
3479
3480 if (ifp->if_capenable & IFCAP_RXCSUM) {
3481 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3482 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3483 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3484 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3485 }
3486 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3487 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3488 m->m_pkthdr.csum_data =
3489 cur_rx->bge_tcp_udp_csum;
3490 m->m_pkthdr.csum_flags |=
3491 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3492 }
3493 }
3494
3495 /*
3496 * If we received a packet with a vlan tag,
3497 * attach that information to the packet.
3498 */
3499 if (have_tag) {
3500#if __FreeBSD_version > 700022
3501 m->m_pkthdr.ether_vtag = vlan_tag;
3502 m->m_flags |= M_VLANTAG;
3503#else
3504 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3505 if (m == NULL)
3506 continue;
3507#endif
3508 }
3509
3510 if (holdlck != 0) {
3511 BGE_UNLOCK(sc);
3512 (*ifp->if_input)(ifp, m);
3513 BGE_LOCK(sc);
3514 } else
3515 (*ifp->if_input)(ifp, m);
3516 rx_npkts++;
3517
3518 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3519 return (rx_npkts);
3520 }
3521
3522 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3523 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3524 if (stdcnt > 0)
3525 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3526 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3527
3528 if (jumbocnt > 0)
3529 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3530 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3531
3532 sc->bge_rx_saved_considx = rx_cons;
3533 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3534 if (stdcnt)
3535 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3536 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3537 if (jumbocnt)
3538 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3539 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3540#ifdef notyet
3541 /*
3542 * This register wraps very quickly under heavy packet drops.
3543 * If you need correct statistics, you can enable this check.
3544 */
3545 if (BGE_IS_5705_PLUS(sc))
3546 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3547#endif
3548 return (rx_npkts);
3549}
3550
3551static void
3552bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3553{
3554 struct bge_tx_bd *cur_tx;
3555 struct ifnet *ifp;
3556
3557 BGE_LOCK_ASSERT(sc);
3558
3559 /* Nothing to do. */
3560 if (sc->bge_tx_saved_considx == tx_cons)
3561 return;
3562
3563 ifp = sc->bge_ifp;
3564
3565 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3566 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3567 /*
3568 * Go through our tx ring and free mbufs for those
3569 * frames that have been sent.
3570 */
3571 while (sc->bge_tx_saved_considx != tx_cons) {
3572 uint32_t idx;
3573
3574 idx = sc->bge_tx_saved_considx;
3575 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3576 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3577 ifp->if_opackets++;
3578 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3579 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3580 sc->bge_cdata.bge_tx_dmamap[idx],
3581 BUS_DMASYNC_POSTWRITE);
3582 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3583 sc->bge_cdata.bge_tx_dmamap[idx]);
3584 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3585 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3586 }
3587 sc->bge_txcnt--;
3588 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3589 }
3590
3591 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3592 if (sc->bge_txcnt == 0)
3593 sc->bge_timer = 0;
3594}
3595
3596#ifdef DEVICE_POLLING
3597static int
3598bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3599{
3600 struct bge_softc *sc = ifp->if_softc;
3601 uint16_t rx_prod, tx_cons;
3602 uint32_t statusword;
3603 int rx_npkts = 0;
3604
3605 BGE_LOCK(sc);
3606 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3607 BGE_UNLOCK(sc);
3608 return (rx_npkts);
3609 }
3610
3611 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3612 sc->bge_cdata.bge_status_map,
3613 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3614 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3615 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3616
3617 statusword = sc->bge_ldata.bge_status_block->bge_status;
3618 sc->bge_ldata.bge_status_block->bge_status = 0;
3619
3620 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3621 sc->bge_cdata.bge_status_map,
3622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3623
3624 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3625 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3626 sc->bge_link_evt++;
3627
3628 if (cmd == POLL_AND_CHECK_STATUS)
3629 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3630 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3631 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3632 bge_link_upd(sc);
3633
3634 sc->rxcycles = count;
3635 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3636 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3637 BGE_UNLOCK(sc);
3638 return (rx_npkts);
3639 }
3640 bge_txeof(sc, tx_cons);
3641 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3642 bge_start_locked(ifp);
3643
3644 BGE_UNLOCK(sc);
3645 return (rx_npkts);
3646}
3647#endif /* DEVICE_POLLING */
3648
3649static int
3650bge_msi_intr(void *arg)
3651{
3652 struct bge_softc *sc;
3653
3654 sc = (struct bge_softc *)arg;
3655 /*
3656 * This interrupt is not shared and controller already
3657 * disabled further interrupt.
3658 */
3659 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3660 return (FILTER_HANDLED);
3661}
3662
3663static void
3664bge_intr_task(void *arg, int pending)
3665{
3666 struct bge_softc *sc;
3667 struct ifnet *ifp;
3668 uint32_t status;
3669 uint16_t rx_prod, tx_cons;
3670
3671 sc = (struct bge_softc *)arg;
3672 ifp = sc->bge_ifp;
3673
3674 BGE_LOCK(sc);
3675 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3676 BGE_UNLOCK(sc);
3677 return;
3678 }
3679
3680 /* Get updated status block. */
3681 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3682 sc->bge_cdata.bge_status_map,
3683 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3684
3685 /* Save producer/consumer indexess. */
3686 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3687 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3688 status = sc->bge_ldata.bge_status_block->bge_status;
3689 sc->bge_ldata.bge_status_block->bge_status = 0;
3690 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3691 sc->bge_cdata.bge_status_map,
3692 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3693
3694 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3695 bge_link_upd(sc);
3696
3697 /* Let controller work. */
3698 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3699
3700 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3701 sc->bge_rx_saved_considx != rx_prod) {
3702 /* Check RX return ring producer/consumer. */
3703 BGE_UNLOCK(sc);
3704 bge_rxeof(sc, rx_prod, 0);
3705 BGE_LOCK(sc);
3706 }
3707 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3708 /* Check TX ring producer/consumer. */
3709 bge_txeof(sc, tx_cons);
3710 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3711 bge_start_locked(ifp);
3712 }
3713 BGE_UNLOCK(sc);
3714}
3715
3716static void
3717bge_intr(void *xsc)
3718{
3719 struct bge_softc *sc;
3720 struct ifnet *ifp;
3721 uint32_t statusword;
3722 uint16_t rx_prod, tx_cons;
3723
3724 sc = xsc;
3725
3726 BGE_LOCK(sc);
3727
3728 ifp = sc->bge_ifp;
3729
3730#ifdef DEVICE_POLLING
3731 if (ifp->if_capenable & IFCAP_POLLING) {
3732 BGE_UNLOCK(sc);
3733 return;
3734 }
3735#endif
3736
3737 /*
3738 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3739 * disable interrupts by writing nonzero like we used to, since with
3740 * our current organization this just gives complications and
3741 * pessimizations for re-enabling interrupts. We used to have races
3742 * instead of the necessary complications. Disabling interrupts
3743 * would just reduce the chance of a status update while we are
3744 * running (by switching to the interrupt-mode coalescence
3745 * parameters), but this chance is already very low so it is more
3746 * efficient to get another interrupt than prevent it.
3747 *
3748 * We do the ack first to ensure another interrupt if there is a
3749 * status update after the ack. We don't check for the status
3750 * changing later because it is more efficient to get another
3751 * interrupt than prevent it, not quite as above (not checking is
3752 * a smaller optimization than not toggling the interrupt enable,
3753 * since checking doesn't involve PCI accesses and toggling require
3754 * the status check). So toggling would probably be a pessimization
3755 * even with MSI. It would only be needed for using a task queue.
3756 */
3757 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3758
3759 /*
3760 * Do the mandatory PCI flush as well as get the link status.
3761 */
3762 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3763
3764 /* Make sure the descriptor ring indexes are coherent. */
3765 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3766 sc->bge_cdata.bge_status_map,
3767 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3768 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3769 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3770 sc->bge_ldata.bge_status_block->bge_status = 0;
3771 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3772 sc->bge_cdata.bge_status_map,
3773 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3774
3775 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3776 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3777 statusword || sc->bge_link_evt)
3778 bge_link_upd(sc);
3779
3780 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3781 /* Check RX return ring producer/consumer. */
3782 bge_rxeof(sc, rx_prod, 1);
3783 }
3784
3785 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3786 /* Check TX ring producer/consumer. */
3787 bge_txeof(sc, tx_cons);
3788 }
3789
3790 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3791 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3792 bge_start_locked(ifp);
3793
3794 BGE_UNLOCK(sc);
3795}
3796
3797static void
3798bge_asf_driver_up(struct bge_softc *sc)
3799{
3800 if (sc->bge_asf_mode & ASF_STACKUP) {
3801 /* Send ASF heartbeat aprox. every 2s */
3802 if (sc->bge_asf_count)
3803 sc->bge_asf_count --;
3804 else {
3805 sc->bge_asf_count = 2;
3806 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3807 BGE_FW_DRV_ALIVE);
3808 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3809 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3810 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3811 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3812 }
3813 }
3814}
3815
3816static void
3817bge_tick(void *xsc)
3818{
3819 struct bge_softc *sc = xsc;
3820 struct mii_data *mii = NULL;
3821
3822 BGE_LOCK_ASSERT(sc);
3823
3824 /* Synchronize with possible callout reset/stop. */
3825 if (callout_pending(&sc->bge_stat_ch) ||
3826 !callout_active(&sc->bge_stat_ch))
3827 return;
3828
3829 if (BGE_IS_5705_PLUS(sc))
3830 bge_stats_update_regs(sc);
3831 else
3832 bge_stats_update(sc);
3833
3834 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3835 mii = device_get_softc(sc->bge_miibus);
3836 /*
3837 * Do not touch PHY if we have link up. This could break
3838 * IPMI/ASF mode or produce extra input errors
3839 * (extra errors was reported for bcm5701 & bcm5704).
3840 */
3841 if (!sc->bge_link)
3842 mii_tick(mii);
3843 } else {
3844 /*
3845 * Since in TBI mode auto-polling can't be used we should poll
3846 * link status manually. Here we register pending link event
3847 * and trigger interrupt.
3848 */
3849#ifdef DEVICE_POLLING
3850 /* In polling mode we poll link state in bge_poll(). */
3851 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3852#endif
3853 {
3854 sc->bge_link_evt++;
3855 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3856 sc->bge_flags & BGE_FLAG_5788)
3857 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3858 else
3859 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3860 }
3861 }
3862
3863 bge_asf_driver_up(sc);
3864 bge_watchdog(sc);
3865
3866 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3867}
3868
3869static void
3870bge_stats_update_regs(struct bge_softc *sc)
3871{
3872 struct ifnet *ifp;
3873 struct bge_mac_stats *stats;
3874
3875 ifp = sc->bge_ifp;
3876 stats = &sc->bge_mac_stats;
3877
3878 stats->ifHCOutOctets +=
3879 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3880 stats->etherStatsCollisions +=
3881 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3882 stats->outXonSent +=
3883 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3884 stats->outXoffSent +=
3885 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3886 stats->dot3StatsInternalMacTransmitErrors +=
3887 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3888 stats->dot3StatsSingleCollisionFrames +=
3889 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3890 stats->dot3StatsMultipleCollisionFrames +=
3891 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3892 stats->dot3StatsDeferredTransmissions +=
3893 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3894 stats->dot3StatsExcessiveCollisions +=
3895 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3896 stats->dot3StatsLateCollisions +=
3897 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3898 stats->ifHCOutUcastPkts +=
3899 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3900 stats->ifHCOutMulticastPkts +=
3901 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3902 stats->ifHCOutBroadcastPkts +=
3903 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3904
3905 stats->ifHCInOctets +=
3906 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3907 stats->etherStatsFragments +=
3908 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3909 stats->ifHCInUcastPkts +=
3910 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3911 stats->ifHCInMulticastPkts +=
3912 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3913 stats->ifHCInBroadcastPkts +=
3914 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3915 stats->dot3StatsFCSErrors +=
3916 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3917 stats->dot3StatsAlignmentErrors +=
3918 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3919 stats->xonPauseFramesReceived +=
3920 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3921 stats->xoffPauseFramesReceived +=
3922 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3923 stats->macControlFramesReceived +=
3924 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3925 stats->xoffStateEntered +=
3926 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3927 stats->dot3StatsFramesTooLong +=
3928 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3929 stats->etherStatsJabbers +=
3930 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3931 stats->etherStatsUndersizePkts +=
3932 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3933
3934 stats->FramesDroppedDueToFilters +=
3935 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3936 stats->DmaWriteQueueFull +=
3937 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3938 stats->DmaWriteHighPriQueueFull +=
3939 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3940 stats->NoMoreRxBDs +=
3941 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3942 stats->InputDiscards +=
3943 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3944 stats->InputErrors +=
3945 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3946 stats->RecvThresholdHit +=
3947 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3948
3949 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
3950 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
3951 stats->InputErrors);
3952}
3953
3954static void
3955bge_stats_clear_regs(struct bge_softc *sc)
3956{
3957
3958 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3959 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3960 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3961 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3962 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3963 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3964 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3965 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3966 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3967 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3968 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3969 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3970 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3971
3972 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3973 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3974 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3975 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3976 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3977 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3978 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3979 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3980 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3981 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3982 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3983 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3984 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3985 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3986
3987 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3988 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3989 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3990 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3991 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3992 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3993 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3994}
3995
3996static void
3997bge_stats_update(struct bge_softc *sc)
3998{
3999 struct ifnet *ifp;
4000 bus_size_t stats;
4001 uint32_t cnt; /* current register value */
4002
4003 ifp = sc->bge_ifp;
4004
4005 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4006
4007#define READ_STAT(sc, stats, stat) \
4008 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4009
4010 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4011 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4012 sc->bge_tx_collisions = cnt;
4013
4014 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4015 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4016 sc->bge_rx_discards = cnt;
4017
4018 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4019 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4020 sc->bge_tx_discards = cnt;
4021
4022#undef READ_STAT
4023}
4024
4025/*
4026 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4027 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4028 * but when such padded frames employ the bge IP/TCP checksum offload,
4029 * the hardware checksum assist gives incorrect results (possibly
4030 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4031 * If we pad such runts with zeros, the onboard checksum comes out correct.
4032 */
4033static __inline int
4034bge_cksum_pad(struct mbuf *m)
4035{
4036 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4037 struct mbuf *last;
4038
4039 /* If there's only the packet-header and we can pad there, use it. */
4040 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4041 M_TRAILINGSPACE(m) >= padlen) {
4042 last = m;
4043 } else {
4044 /*
4045 * Walk packet chain to find last mbuf. We will either
4046 * pad there, or append a new mbuf and pad it.
4047 */
4048 for (last = m; last->m_next != NULL; last = last->m_next);
4049 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4050 /* Allocate new empty mbuf, pad it. Compact later. */
4051 struct mbuf *n;
4052
4053 MGET(n, M_DONTWAIT, MT_DATA);
4054 if (n == NULL)
4055 return (ENOBUFS);
4056 n->m_len = 0;
4057 last->m_next = n;
4058 last = n;
4059 }
4060 }
4061
4062 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4063 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4064 last->m_len += padlen;
4065 m->m_pkthdr.len += padlen;
4066
4067 return (0);
4068}
4069
4070static struct mbuf *
4071bge_check_short_dma(struct mbuf *m)
4072{
4073 struct mbuf *n;
4074 int found;
4075
4076 /*
4077 * If device receive two back-to-back send BDs with less than
4078 * or equal to 8 total bytes then the device may hang. The two
4079 * back-to-back send BDs must in the same frame for this failure
4080 * to occur. Scan mbuf chains and see whether two back-to-back
4081 * send BDs are there. If this is the case, allocate new mbuf
4082 * and copy the frame to workaround the silicon bug.
4083 */
4084 for (n = m, found = 0; n != NULL; n = n->m_next) {
4085 if (n->m_len < 8) {
4086 found++;
4087 if (found > 1)
4088 break;
4089 continue;
4090 }
4091 found = 0;
4092 }
4093
4094 if (found > 1) {
4095 n = m_defrag(m, M_DONTWAIT);
4096 if (n == NULL)
4097 m_freem(m);
4098 } else
4099 n = m;
4100 return (n);
4101}
4102
4103static struct mbuf *
4104bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
4105{
4106 struct ip *ip;
4107 struct tcphdr *tcp;
4108 struct mbuf *n;
4109 uint16_t hlen;
4110 uint32_t poff;
4111
4112 if (M_WRITABLE(m) == 0) {
4113 /* Get a writable copy. */
4114 n = m_dup(m, M_DONTWAIT);
4115 m_freem(m);
4116 if (n == NULL)
4117 return (NULL);
4118 m = n;
4119 }
4120 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4121 if (m == NULL)
4122 return (NULL);
4123 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4124 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4125 m = m_pullup(m, poff + sizeof(struct tcphdr));
4126 if (m == NULL)
4127 return (NULL);
4128 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4129 m = m_pullup(m, poff + (tcp->th_off << 2));
4130 if (m == NULL)
4131 return (NULL);
4132 /*
4133 * It seems controller doesn't modify IP length and TCP pseudo
4134 * checksum. These checksum computed by upper stack should be 0.
4135 */
4136 *mss = m->m_pkthdr.tso_segsz;
4137 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4138 ip->ip_sum = 0;
4139 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4140 /* Clear pseudo checksum computed by TCP stack. */
4141 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4142 tcp->th_sum = 0;
4143 /*
4144 * Broadcom controllers uses different descriptor format for
4145 * TSO depending on ASIC revision. Due to TSO-capable firmware
4146 * license issue and lower performance of firmware based TSO
4147 * we only support hardware based TSO which is applicable for
4148 * BCM5755 or newer controllers. Hardware based TSO uses 11
4149 * bits to store MSS and upper 5 bits are used to store IP/TCP
4150 * header length(including IP/TCP options). The header length
4151 * is expressed as 32 bits unit.
4152 */
4153 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4154 *mss |= (hlen << 11);
4155 return (m);
4156}
4157
4158/*
4159 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4160 * pointers to descriptors.
4161 */
4162static int
4163bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4164{
4165 bus_dma_segment_t segs[BGE_NSEG_NEW];
4166 bus_dmamap_t map;
4167 struct bge_tx_bd *d;
4168 struct mbuf *m = *m_head;
4169 uint32_t idx = *txidx;
4170 uint16_t csum_flags, mss, vlan_tag;
4171 int nsegs, i, error;
4172
4173 csum_flags = 0;
4174 mss = 0;
4175 vlan_tag = 0;
4176 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4177 m->m_next != NULL) {
4178 *m_head = bge_check_short_dma(m);
4179 if (*m_head == NULL)
4180 return (ENOBUFS);
4181 m = *m_head;
4182 }
4183 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4184 *m_head = m = bge_setup_tso(sc, m, &mss);
4185 if (*m_head == NULL)
4186 return (ENOBUFS);
4187 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4188 BGE_TXBDFLAG_CPU_POST_DMA;
4189 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4190 if (m->m_pkthdr.csum_flags & CSUM_IP)
4191 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4192 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4193 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4194 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4195 (error = bge_cksum_pad(m)) != 0) {
4196 m_freem(m);
4197 *m_head = NULL;
4198 return (error);
4199 }
4200 }
4201 if (m->m_flags & M_LASTFRAG)
4202 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4203 else if (m->m_flags & M_FRAG)
4204 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4205 }
4206
4207 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
4208 sc->bge_forced_collapse > 0 &&
4209 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4210 /*
4211 * Forcedly collapse mbuf chains to overcome hardware
4212 * limitation which only support a single outstanding
4213 * DMA read operation.
4214 */
4215 if (sc->bge_forced_collapse == 1)
4216 m = m_defrag(m, M_DONTWAIT);
4217 else
4218 m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
4219 if (m == NULL)
4220 m = *m_head;
4221 *m_head = m;
4222 }
4223
4224 map = sc->bge_cdata.bge_tx_dmamap[idx];
4225 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4226 &nsegs, BUS_DMA_NOWAIT);
4227 if (error == EFBIG) {
4228 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4229 if (m == NULL) {
4230 m_freem(*m_head);
4231 *m_head = NULL;
4232 return (ENOBUFS);
4233 }
4234 *m_head = m;
4235 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4236 m, segs, &nsegs, BUS_DMA_NOWAIT);
4237 if (error) {
4238 m_freem(m);
4239 *m_head = NULL;
4240 return (error);
4241 }
4242 } else if (error != 0)
4243 return (error);
4244
4245 /* Check if we have enough free send BDs. */
4246 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4247 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4248 return (ENOBUFS);
4249 }
4250
4251 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4252
4253#if __FreeBSD_version > 700022
4254 if (m->m_flags & M_VLANTAG) {
4255 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4256 vlan_tag = m->m_pkthdr.ether_vtag;
4257 }
4258#else
4259 {
4260 struct m_tag *mtag;
4261
4262 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4263 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4264 vlan_tag = VLAN_TAG_VALUE(mtag);
4265 }
4266 }
4267#endif
4268 for (i = 0; ; i++) {
4269 d = &sc->bge_ldata.bge_tx_ring[idx];
4270 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4271 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4272 d->bge_len = segs[i].ds_len;
4273 d->bge_flags = csum_flags;
4274 d->bge_vlan_tag = vlan_tag;
4275 d->bge_mss = mss;
4276 if (i == nsegs - 1)
4277 break;
4278 BGE_INC(idx, BGE_TX_RING_CNT);
4279 }
4280
4281 /* Mark the last segment as end of packet... */
4282 d->bge_flags |= BGE_TXBDFLAG_END;
4283
4284 /*
4285 * Insure that the map for this transmission
4286 * is placed at the array index of the last descriptor
4287 * in this chain.
4288 */
4289 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4290 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4291 sc->bge_cdata.bge_tx_chain[idx] = m;
4292 sc->bge_txcnt += nsegs;
4293
4294 BGE_INC(idx, BGE_TX_RING_CNT);
4295 *txidx = idx;
4296
4297 return (0);
4298}
4299
4300/*
4301 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4302 * to the mbuf data regions directly in the transmit descriptors.
4303 */
4304static void
4305bge_start_locked(struct ifnet *ifp)
4306{
4307 struct bge_softc *sc;
4308 struct mbuf *m_head;
4309 uint32_t prodidx;
4310 int count;
4311
4312 sc = ifp->if_softc;
4313 BGE_LOCK_ASSERT(sc);
4314
4315 if (!sc->bge_link ||
4316 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4317 IFF_DRV_RUNNING)
4318 return;
4319
4320 prodidx = sc->bge_tx_prodidx;
4321
4322 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4323 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4324 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4325 break;
4326 }
4327 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4328 if (m_head == NULL)
4329 break;
4330
4331 /*
4332 * XXX
4333 * The code inside the if() block is never reached since we
4334 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4335 * requests to checksum TCP/UDP in a fragmented packet.
4336 *
4337 * XXX
4338 * safety overkill. If this is a fragmented packet chain
4339 * with delayed TCP/UDP checksums, then only encapsulate
4340 * it if we have enough descriptors to handle the entire
4341 * chain at once.
4342 * (paranoia -- may not actually be needed)
4343 */
4344 if (m_head->m_flags & M_FIRSTFRAG &&
4345 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4346 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4347 m_head->m_pkthdr.csum_data + 16) {
4348 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4349 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4350 break;
4351 }
4352 }
4353
4354 /*
4355 * Pack the data into the transmit ring. If we
4356 * don't have room, set the OACTIVE flag and wait
4357 * for the NIC to drain the ring.
4358 */
4359 if (bge_encap(sc, &m_head, &prodidx)) {
4360 if (m_head == NULL)
4361 break;
4362 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4363 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4364 break;
4365 }
4366 ++count;
4367
4368 /*
4369 * If there's a BPF listener, bounce a copy of this frame
4370 * to him.
4371 */
4372#ifdef ETHER_BPF_MTAP
4373 ETHER_BPF_MTAP(ifp, m_head);
4374#else
4375 BPF_MTAP(ifp, m_head);
4376#endif
4377 }
4378
4379 if (count > 0) {
4380 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4381 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4382 /* Transmit. */
4383 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4384 /* 5700 b2 errata */
4385 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4386 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4387
4388 sc->bge_tx_prodidx = prodidx;
4389
4390 /*
4391 * Set a timeout in case the chip goes out to lunch.
4392 */
4393 sc->bge_timer = 5;
4394 }
4395}
4396
4397/*
4398 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4399 * to the mbuf data regions directly in the transmit descriptors.
4400 */
4401static void
4402bge_start(struct ifnet *ifp)
4403{
4404 struct bge_softc *sc;
4405
4406 sc = ifp->if_softc;
4407 BGE_LOCK(sc);
4408 bge_start_locked(ifp);
4409 BGE_UNLOCK(sc);
4410}
4411
4412static void
4413bge_init_locked(struct bge_softc *sc)
4414{
4415 struct ifnet *ifp;
4416 uint16_t *m;
4417 uint32_t mode;
4418
4419 BGE_LOCK_ASSERT(sc);
4420
4421 ifp = sc->bge_ifp;
4422
4423 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4424 return;
4425
4426 /* Cancel pending I/O and flush buffers. */
4427 bge_stop(sc);
4428
4429 bge_stop_fw(sc);
4430 bge_sig_pre_reset(sc, BGE_RESET_START);
4431 bge_reset(sc);
4432 bge_sig_legacy(sc, BGE_RESET_START);
4433 bge_sig_post_reset(sc, BGE_RESET_START);
4434
4435 bge_chipinit(sc);
4436
4437 /*
4438 * Init the various state machines, ring
4439 * control blocks and firmware.
4440 */
4441 if (bge_blockinit(sc)) {
4442 device_printf(sc->bge_dev, "initialization failure\n");
4443 return;
4444 }
4445
4446 ifp = sc->bge_ifp;
4447
4448 /* Specify MTU. */
4449 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4450 ETHER_HDR_LEN + ETHER_CRC_LEN +
4451 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4452
4453 /* Load our MAC address. */
4454 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4455 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4456 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4457
4458 /* Program promiscuous mode. */
4459 bge_setpromisc(sc);
4460
4461 /* Program multicast filter. */
4462 bge_setmulti(sc);
4463
4464 /* Program VLAN tag stripping. */
4465 bge_setvlan(sc);
4466
4467 /* Override UDP checksum offloading. */
4468 if (sc->bge_forced_udpcsum == 0)
4469 sc->bge_csum_features &= ~CSUM_UDP;
4470 else
4471 sc->bge_csum_features |= CSUM_UDP;
4472 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4473 ifp->if_capenable & IFCAP_TXCSUM) {
4474 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4475 ifp->if_hwassist |= sc->bge_csum_features;
4476 }
4477
4478 /* Init RX ring. */
4479 if (bge_init_rx_ring_std(sc) != 0) {
4480 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4481 bge_stop(sc);
4482 return;
4483 }
4484
4485 /*
4486 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4487 * memory to insure that the chip has in fact read the first
4488 * entry of the ring.
4489 */
4490 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4491 uint32_t v, i;
4492 for (i = 0; i < 10; i++) {
4493 DELAY(20);
4494 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4495 if (v == (MCLBYTES - ETHER_ALIGN))
4496 break;
4497 }
4498 if (i == 10)
4499 device_printf (sc->bge_dev,
4500 "5705 A0 chip failed to load RX ring\n");
4501 }
4502
4503 /* Init jumbo RX ring. */
4504 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4505 (MCLBYTES - ETHER_ALIGN)) {
4506 if (bge_init_rx_ring_jumbo(sc) != 0) {
4507 device_printf(sc->bge_dev,
4508 "no memory for jumbo Rx buffers.\n");
4509 bge_stop(sc);
4510 return;
4511 }
4512 }
4513
4514 /* Init our RX return ring index. */
4515 sc->bge_rx_saved_considx = 0;
4516
4517 /* Init our RX/TX stat counters. */
4518 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4519
4520 /* Init TX ring. */
4521 bge_init_tx_ring(sc);
4522
4523 /* Enable TX MAC state machine lockup fix. */
4524 mode = CSR_READ_4(sc, BGE_TX_MODE);
4525 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4526 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4527 /* Turn on transmitter. */
4528 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4529
4530 /* Turn on receiver. */
4531 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4532
4533 /*
4534 * Set the number of good frames to receive after RX MBUF
4535 * Low Watermark has been reached. After the RX MAC receives
4536 * this number of frames, it will drop subsequent incoming
4537 * frames until the MBUF High Watermark is reached.
4538 */
4539 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4540
4541 /* Clear MAC statistics. */
4542 if (BGE_IS_5705_PLUS(sc))
4543 bge_stats_clear_regs(sc);
4544
4545 /* Tell firmware we're alive. */
4546 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4547
4548#ifdef DEVICE_POLLING
4549 /* Disable interrupts if we are polling. */
4550 if (ifp->if_capenable & IFCAP_POLLING) {
4551 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4552 BGE_PCIMISCCTL_MASK_PCI_INTR);
4553 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4554 } else
4555#endif
4556
4557 /* Enable host interrupts. */
4558 {
4559 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4560 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4561 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4562 }
4563
4564 bge_ifmedia_upd_locked(ifp);
4565
4566 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4567 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4568
4569 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4570}
4571
4572static void
4573bge_init(void *xsc)
4574{
4575 struct bge_softc *sc = xsc;
4576
4577 BGE_LOCK(sc);
4578 bge_init_locked(sc);
4579 BGE_UNLOCK(sc);
4580}
4581
4582/*
4583 * Set media options.
4584 */
4585static int
4586bge_ifmedia_upd(struct ifnet *ifp)
4587{
4588 struct bge_softc *sc = ifp->if_softc;
4589 int res;
4590
4591 BGE_LOCK(sc);
4592 res = bge_ifmedia_upd_locked(ifp);
4593 BGE_UNLOCK(sc);
4594
4595 return (res);
4596}
4597
4598static int
4599bge_ifmedia_upd_locked(struct ifnet *ifp)
4600{
4601 struct bge_softc *sc = ifp->if_softc;
4602 struct mii_data *mii;
4603 struct mii_softc *miisc;
4604 struct ifmedia *ifm;
4605
4606 BGE_LOCK_ASSERT(sc);
4607
4608 ifm = &sc->bge_ifmedia;
4609
4610 /* If this is a 1000baseX NIC, enable the TBI port. */
4611 if (sc->bge_flags & BGE_FLAG_TBI) {
4612 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4613 return (EINVAL);
4614 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4615 case IFM_AUTO:
4616 /*
4617 * The BCM5704 ASIC appears to have a special
4618 * mechanism for programming the autoneg
4619 * advertisement registers in TBI mode.
4620 */
4621 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4622 uint32_t sgdig;
4623 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4624 if (sgdig & BGE_SGDIGSTS_DONE) {
4625 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4626 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4627 sgdig |= BGE_SGDIGCFG_AUTO |
4628 BGE_SGDIGCFG_PAUSE_CAP |
4629 BGE_SGDIGCFG_ASYM_PAUSE;
4630 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4631 sgdig | BGE_SGDIGCFG_SEND);
4632 DELAY(5);
4633 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4634 }
4635 }
4636 break;
4637 case IFM_1000_SX:
4638 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4639 BGE_CLRBIT(sc, BGE_MAC_MODE,
4640 BGE_MACMODE_HALF_DUPLEX);
4641 } else {
4642 BGE_SETBIT(sc, BGE_MAC_MODE,
4643 BGE_MACMODE_HALF_DUPLEX);
4644 }
4645 break;
4646 default:
4647 return (EINVAL);
4648 }
4649 return (0);
4650 }
4651
4652 sc->bge_link_evt++;
4653 mii = device_get_softc(sc->bge_miibus);
4654 if (mii->mii_instance)
4655 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4656 mii_phy_reset(miisc);
4657 mii_mediachg(mii);
4658
4659 /*
4660 * Force an interrupt so that we will call bge_link_upd
4661 * if needed and clear any pending link state attention.
4662 * Without this we are not getting any further interrupts
4663 * for link state changes and thus will not UP the link and
4664 * not be able to send in bge_start_locked. The only
4665 * way to get things working was to receive a packet and
4666 * get an RX intr.
4667 * bge_tick should help for fiber cards and we might not
4668 * need to do this here if BGE_FLAG_TBI is set but as
4669 * we poll for fiber anyway it should not harm.
4670 */
4671 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4672 sc->bge_flags & BGE_FLAG_5788)
4673 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4674 else
4675 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4676
4677 return (0);
4678}
4679
4680/*
4681 * Report current media status.
4682 */
4683static void
4684bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4685{
4686 struct bge_softc *sc = ifp->if_softc;
4687 struct mii_data *mii;
4688
4689 BGE_LOCK(sc);
4690
4691 if (sc->bge_flags & BGE_FLAG_TBI) {
4692 ifmr->ifm_status = IFM_AVALID;
4693 ifmr->ifm_active = IFM_ETHER;
4694 if (CSR_READ_4(sc, BGE_MAC_STS) &
4695 BGE_MACSTAT_TBI_PCS_SYNCHED)
4696 ifmr->ifm_status |= IFM_ACTIVE;
4697 else {
4698 ifmr->ifm_active |= IFM_NONE;
4699 BGE_UNLOCK(sc);
4700 return;
4701 }
4702 ifmr->ifm_active |= IFM_1000_SX;
4703 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4704 ifmr->ifm_active |= IFM_HDX;
4705 else
4706 ifmr->ifm_active |= IFM_FDX;
4707 BGE_UNLOCK(sc);
4708 return;
4709 }
4710
4711 mii = device_get_softc(sc->bge_miibus);
4712 mii_pollstat(mii);
4713 ifmr->ifm_active = mii->mii_media_active;
4714 ifmr->ifm_status = mii->mii_media_status;
4715
4716 BGE_UNLOCK(sc);
4717}
4718
4719static int
4720bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4721{
4722 struct bge_softc *sc = ifp->if_softc;
4723 struct ifreq *ifr = (struct ifreq *) data;
4724 struct mii_data *mii;
4725 int flags, mask, error = 0;
4726
4727 switch (command) {
4728 case SIOCSIFMTU:
4729 BGE_LOCK(sc);
4730 if (ifr->ifr_mtu < ETHERMIN ||
4731 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4732 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4733 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4734 ifr->ifr_mtu > ETHERMTU))
4735 error = EINVAL;
4736 else if (ifp->if_mtu != ifr->ifr_mtu) {
4737 ifp->if_mtu = ifr->ifr_mtu;
4738 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4739 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4740 bge_init_locked(sc);
4741 }
4742 }
4743 BGE_UNLOCK(sc);
4744 break;
4745 case SIOCSIFFLAGS:
4746 BGE_LOCK(sc);
4747 if (ifp->if_flags & IFF_UP) {
4748 /*
4749 * If only the state of the PROMISC flag changed,
4750 * then just use the 'set promisc mode' command
4751 * instead of reinitializing the entire NIC. Doing
4752 * a full re-init means reloading the firmware and
4753 * waiting for it to start up, which may take a
4754 * second or two. Similarly for ALLMULTI.
4755 */
4756 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4757 flags = ifp->if_flags ^ sc->bge_if_flags;
4758 if (flags & IFF_PROMISC)
4759 bge_setpromisc(sc);
4760 if (flags & IFF_ALLMULTI)
4761 bge_setmulti(sc);
4762 } else
4763 bge_init_locked(sc);
4764 } else {
4765 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4766 bge_stop(sc);
4767 }
4768 }
4769 sc->bge_if_flags = ifp->if_flags;
4770 BGE_UNLOCK(sc);
4771 error = 0;
4772 break;
4773 case SIOCADDMULTI:
4774 case SIOCDELMULTI:
4775 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4776 BGE_LOCK(sc);
4777 bge_setmulti(sc);
4778 BGE_UNLOCK(sc);
4779 error = 0;
4780 }
4781 break;
4782 case SIOCSIFMEDIA:
4783 case SIOCGIFMEDIA:
4784 if (sc->bge_flags & BGE_FLAG_TBI) {
4785 error = ifmedia_ioctl(ifp, ifr,
4786 &sc->bge_ifmedia, command);
4787 } else {
4788 mii = device_get_softc(sc->bge_miibus);
4789 error = ifmedia_ioctl(ifp, ifr,
4790 &mii->mii_media, command);
4791 }
4792 break;
4793 case SIOCSIFCAP:
4794 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4795#ifdef DEVICE_POLLING
4796 if (mask & IFCAP_POLLING) {
4797 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4798 error = ether_poll_register(bge_poll, ifp);
4799 if (error)
4800 return (error);
4801 BGE_LOCK(sc);
4802 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4803 BGE_PCIMISCCTL_MASK_PCI_INTR);
4804 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4805 ifp->if_capenable |= IFCAP_POLLING;
4806 BGE_UNLOCK(sc);
4807 } else {
4808 error = ether_poll_deregister(ifp);
4809 /* Enable interrupt even in error case */
4810 BGE_LOCK(sc);
4811 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4812 BGE_PCIMISCCTL_MASK_PCI_INTR);
4813 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4814 ifp->if_capenable &= ~IFCAP_POLLING;
4815 BGE_UNLOCK(sc);
4816 }
4817 }
4818#endif
4819 if ((mask & IFCAP_TXCSUM) != 0 &&
4820 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
4821 ifp->if_capenable ^= IFCAP_TXCSUM;
4822 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
4823 ifp->if_hwassist |= sc->bge_csum_features;
4824 else
4825 ifp->if_hwassist &= ~sc->bge_csum_features;
4826 }
4827
4828 if ((mask & IFCAP_RXCSUM) != 0 &&
4829 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
4830 ifp->if_capenable ^= IFCAP_RXCSUM;
4831
4832 if ((mask & IFCAP_TSO4) != 0 &&
4833 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4834 ifp->if_capenable ^= IFCAP_TSO4;
4835 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4836 ifp->if_hwassist |= CSUM_TSO;
4837 else
4838 ifp->if_hwassist &= ~CSUM_TSO;
4839 }
4840
4841 if (mask & IFCAP_VLAN_MTU) {
4842 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4843 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4844 bge_init(sc);
4845 }
4846
4847 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
4848 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
4849 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4850 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
4851 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
4852 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4853 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
4854 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
4855 BGE_LOCK(sc);
4856 bge_setvlan(sc);
4857 BGE_UNLOCK(sc);
4858 }
4859#ifdef VLAN_CAPABILITIES
4860 VLAN_CAPABILITIES(ifp);
4861#endif
4862 break;
4863 default:
4864 error = ether_ioctl(ifp, command, data);
4865 break;
4866 }
4867
4868 return (error);
4869}
4870
4871static void
4872bge_watchdog(struct bge_softc *sc)
4873{
4874 struct ifnet *ifp;
4875
4876 BGE_LOCK_ASSERT(sc);
4877
4878 if (sc->bge_timer == 0 || --sc->bge_timer)
4879 return;
4880
4881 ifp = sc->bge_ifp;
4882
4883 if_printf(ifp, "watchdog timeout -- resetting\n");
4884
4885 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4886 bge_init_locked(sc);
4887
4888 ifp->if_oerrors++;
4889}
4890
4891/*
4892 * Stop the adapter and free any mbufs allocated to the
4893 * RX and TX lists.
4894 */
4895static void
4896bge_stop(struct bge_softc *sc)
4897{
4898 struct ifnet *ifp;
4899
4900 BGE_LOCK_ASSERT(sc);
4901
4902 ifp = sc->bge_ifp;
4903
4904 callout_stop(&sc->bge_stat_ch);
4905
4906 /* Disable host interrupts. */
4907 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4908 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4909
4910 /*
4911 * Tell firmware we're shutting down.
4912 */
4913 bge_stop_fw(sc);
4914 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4915
4916 /*
4917 * Disable all of the receiver blocks.
4918 */
4919 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4920 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4921 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4922 if (!(BGE_IS_5705_PLUS(sc)))
4923 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4924 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4925 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4926 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4927
4928 /*
4929 * Disable all of the transmit blocks.
4930 */
4931 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4932 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4933 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4934 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4935 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4936 if (!(BGE_IS_5705_PLUS(sc)))
4937 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4938 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4939
4940 /*
4941 * Shut down all of the memory managers and related
4942 * state machines.
4943 */
4944 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4945 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4946 if (!(BGE_IS_5705_PLUS(sc)))
4947 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4948 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4949 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4950 if (!(BGE_IS_5705_PLUS(sc))) {
4951 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4952 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4953 }
4954 /* Update MAC statistics. */
4955 if (BGE_IS_5705_PLUS(sc))
4956 bge_stats_update_regs(sc);
4957
4958 bge_reset(sc);
4959 bge_sig_legacy(sc, BGE_RESET_STOP);
4960 bge_sig_post_reset(sc, BGE_RESET_STOP);
4961
4962 /*
4963 * Keep the ASF firmware running if up.
4964 */
4965 if (sc->bge_asf_mode & ASF_STACKUP)
4966 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4967 else
4968 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4969
4970 /* Free the RX lists. */
4971 bge_free_rx_ring_std(sc);
4972
4973 /* Free jumbo RX list. */
4974 if (BGE_IS_JUMBO_CAPABLE(sc))
4975 bge_free_rx_ring_jumbo(sc);
4976
4977 /* Free TX buffers. */
4978 bge_free_tx_ring(sc);
4979
4980 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4981
4982 /* Clear MAC's link state (PHY may still have link UP). */
4983 if (bootverbose && sc->bge_link)
4984 if_printf(sc->bge_ifp, "link DOWN\n");
4985 sc->bge_link = 0;
4986
4987 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4988}
4989
4990/*
4991 * Stop all chip I/O so that the kernel's probe routines don't
4992 * get confused by errant DMAs when rebooting.
4993 */
4994static int
4995bge_shutdown(device_t dev)
4996{
4997 struct bge_softc *sc;
4998
4999 sc = device_get_softc(dev);
5000 BGE_LOCK(sc);
5001 bge_stop(sc);
5002 bge_reset(sc);
5003 BGE_UNLOCK(sc);
5004
5005 return (0);
5006}
5007
5008static int
5009bge_suspend(device_t dev)
5010{
5011 struct bge_softc *sc;
5012
5013 sc = device_get_softc(dev);
5014 BGE_LOCK(sc);
5015 bge_stop(sc);
5016 BGE_UNLOCK(sc);
5017
5018 return (0);
5019}
5020
5021static int
5022bge_resume(device_t dev)
5023{
5024 struct bge_softc *sc;
5025 struct ifnet *ifp;
5026
5027 sc = device_get_softc(dev);
5028 BGE_LOCK(sc);
5029 ifp = sc->bge_ifp;
5030 if (ifp->if_flags & IFF_UP) {
5031 bge_init_locked(sc);
5032 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5033 bge_start_locked(ifp);
5034 }
5035 BGE_UNLOCK(sc);
5036
5037 return (0);
5038}
5039
5040static void
5041bge_link_upd(struct bge_softc *sc)
5042{
5043 struct mii_data *mii;
5044 uint32_t link, status;
5045
5046 BGE_LOCK_ASSERT(sc);
5047
5048 /* Clear 'pending link event' flag. */
5049 sc->bge_link_evt = 0;
5050
5051 /*
5052 * Process link state changes.
5053 * Grrr. The link status word in the status block does
5054 * not work correctly on the BCM5700 rev AX and BX chips,
5055 * according to all available information. Hence, we have
5056 * to enable MII interrupts in order to properly obtain
5057 * async link changes. Unfortunately, this also means that
5058 * we have to read the MAC status register to detect link
5059 * changes, thereby adding an additional register access to
5060 * the interrupt handler.
5061 *
5062 * XXX: perhaps link state detection procedure used for
5063 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5064 */
5065
5066 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5067 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5068 status = CSR_READ_4(sc, BGE_MAC_STS);
5069 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5070 mii = device_get_softc(sc->bge_miibus);
5071 mii_pollstat(mii);
5072 if (!sc->bge_link &&
5073 mii->mii_media_status & IFM_ACTIVE &&
5074 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5075 sc->bge_link++;
5076 if (bootverbose)
5077 if_printf(sc->bge_ifp, "link UP\n");
5078 } else if (sc->bge_link &&
5079 (!(mii->mii_media_status & IFM_ACTIVE) ||
5080 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5081 sc->bge_link = 0;
5082 if (bootverbose)
5083 if_printf(sc->bge_ifp, "link DOWN\n");
5084 }
5085
5086 /* Clear the interrupt. */
5087 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5088 BGE_EVTENB_MI_INTERRUPT);
5089 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5090 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5091 BRGPHY_INTRS);
5092 }
5093 return;
5094 }
5095
5096 if (sc->bge_flags & BGE_FLAG_TBI) {
5097 status = CSR_READ_4(sc, BGE_MAC_STS);
5098 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5099 if (!sc->bge_link) {
5100 sc->bge_link++;
5101 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5102 BGE_CLRBIT(sc, BGE_MAC_MODE,
5103 BGE_MACMODE_TBI_SEND_CFGS);
5104 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5105 if (bootverbose)
5106 if_printf(sc->bge_ifp, "link UP\n");
5107 if_link_state_change(sc->bge_ifp,
5108 LINK_STATE_UP);
5109 }
5110 } else if (sc->bge_link) {
5111 sc->bge_link = 0;
5112 if (bootverbose)
5113 if_printf(sc->bge_ifp, "link DOWN\n");
5114 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5115 }
5116 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5117 /*
5118 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5119 * in status word always set. Workaround this bug by reading
5120 * PHY link status directly.
5121 */
5122 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5123
5124 if (link != sc->bge_link ||
5125 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5126 mii = device_get_softc(sc->bge_miibus);
5127 mii_pollstat(mii);
5128 if (!sc->bge_link &&
5129 mii->mii_media_status & IFM_ACTIVE &&
5130 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5131 sc->bge_link++;
5132 if (bootverbose)
5133 if_printf(sc->bge_ifp, "link UP\n");
5134 } else if (sc->bge_link &&
5135 (!(mii->mii_media_status & IFM_ACTIVE) ||
5136 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5137 sc->bge_link = 0;
5138 if (bootverbose)
5139 if_printf(sc->bge_ifp, "link DOWN\n");
5140 }
5141 }
5142 } else {
5143 /*
5144 * For controllers that call mii_tick, we have to poll
5145 * link status.
5146 */
5147 mii = device_get_softc(sc->bge_miibus);
5148 mii_pollstat(mii);
5149 bge_miibus_statchg(sc->bge_dev);
5150 }
5151
5152 /* Clear the attention. */
5153 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5154 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5155 BGE_MACSTAT_LINK_CHANGED);
5156}
5157
5158static void
5159bge_add_sysctls(struct bge_softc *sc)
5160{
5161 struct sysctl_ctx_list *ctx;
5162 struct sysctl_oid_list *children;
5163 char tn[32];
5164 int unit;
5165
5166 ctx = device_get_sysctl_ctx(sc->bge_dev);
5167 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5168
5169#ifdef BGE_REGISTER_DEBUG
5170 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5171 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5172 "Debug Information");
5173
5174 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5175 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5176 "Register Read");
5177
5178 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5179 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5180 "Memory Read");
5181
5182#endif
5183
5184 unit = device_get_unit(sc->bge_dev);
5185 /*
5186 * A common design characteristic for many Broadcom client controllers
5187 * is that they only support a single outstanding DMA read operation
5188 * on the PCIe bus. This means that it will take twice as long to fetch
5189 * a TX frame that is split into header and payload buffers as it does
5190 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5191 * these controllers, coalescing buffers to reduce the number of memory
5192 * reads is effective way to get maximum performance(about 940Mbps).
5193 * Without collapsing TX buffers the maximum TCP bulk transfer
5194 * performance is about 850Mbps. However forcing coalescing mbufs
5195 * consumes a lot of CPU cycles, so leave it off by default.
5196 */
5197 sc->bge_forced_collapse = 0;
5198 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5199 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5200 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5201 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5202 "Number of fragmented TX buffers of a frame allowed before "
5203 "forced collapsing");
5204
5205 /*
5206 * It seems all Broadcom controllers have a bug that can generate UDP
5207 * datagrams with checksum value 0 when TX UDP checksum offloading is
5208 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5209 * Even though the probability of generating such UDP datagrams is
5210 * low, I don't want to see FreeBSD boxes to inject such datagrams
5211 * into network so disable UDP checksum offloading by default. Users
5212 * still override this behavior by setting a sysctl variable,
5213 * dev.bge.0.forced_udpcsum.
5214 */
5215 sc->bge_forced_udpcsum = 0;
5216 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5217 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5218 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5219 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5220 "Enable UDP checksum offloading even if controller can "
5221 "generate UDP checksum value 0");
5222
5223 if (BGE_IS_5705_PLUS(sc))
5224 bge_add_sysctl_stats_regs(sc, ctx, children);
5225 else
5226 bge_add_sysctl_stats(sc, ctx, children);
5227}
5228
5229#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5230 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5231 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5232 desc)
5233
5234static void
5235bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5236 struct sysctl_oid_list *parent)
5237{
5238 struct sysctl_oid *tree;
5239 struct sysctl_oid_list *children, *schildren;
5240
5241 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5242 NULL, "BGE Statistics");
5243 schildren = children = SYSCTL_CHILDREN(tree);
5244 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5245 children, COSFramesDroppedDueToFilters,
5246 "FramesDroppedDueToFilters");
5247 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5248 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5249 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5250 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5251 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5252 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5253 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5254 children, ifInDiscards, "InputDiscards");
5255 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5256 children, ifInErrors, "InputErrors");
5257 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5258 children, nicRecvThresholdHit, "RecvThresholdHit");
5259 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5260 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5261 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5262 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5263 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5264 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5265 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5266 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5267 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5268 children, nicRingStatusUpdate, "RingStatusUpdate");
5269 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5270 children, nicInterrupts, "Interrupts");
5271 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5272 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5273 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5274 children, nicSendThresholdHit, "SendThresholdHit");
5275
5276 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5277 NULL, "BGE RX Statistics");
5278 children = SYSCTL_CHILDREN(tree);
5279 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5280 children, rxstats.ifHCInOctets, "ifHCInOctets");
5281 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5282 children, rxstats.etherStatsFragments, "Fragments");
5283 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5284 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5285 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5286 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5287 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5288 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5289 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5290 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5291 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5292 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5293 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5294 children, rxstats.xoffPauseFramesReceived,
5295 "xoffPauseFramesReceived");
5296 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5297 children, rxstats.macControlFramesReceived,
5298 "ControlFramesReceived");
5299 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5300 children, rxstats.xoffStateEntered, "xoffStateEntered");
5301 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5302 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5303 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5304 children, rxstats.etherStatsJabbers, "Jabbers");
5305 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5306 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5307 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5308 children, rxstats.inRangeLengthError, "inRangeLengthError");
5309 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5310 children, rxstats.outRangeLengthError, "outRangeLengthError");
5311
5312 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5313 NULL, "BGE TX Statistics");
5314 children = SYSCTL_CHILDREN(tree);
5315 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5316 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5317 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5318 children, txstats.etherStatsCollisions, "Collisions");
5319 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5320 children, txstats.outXonSent, "XonSent");
5321 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5322 children, txstats.outXoffSent, "XoffSent");
5323 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5324 children, txstats.flowControlDone, "flowControlDone");
5325 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5326 children, txstats.dot3StatsInternalMacTransmitErrors,
5327 "InternalMacTransmitErrors");
5328 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5329 children, txstats.dot3StatsSingleCollisionFrames,
5330 "SingleCollisionFrames");
5331 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5332 children, txstats.dot3StatsMultipleCollisionFrames,
5333 "MultipleCollisionFrames");
5334 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5335 children, txstats.dot3StatsDeferredTransmissions,
5336 "DeferredTransmissions");
5337 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5338 children, txstats.dot3StatsExcessiveCollisions,
5339 "ExcessiveCollisions");
5340 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5341 children, txstats.dot3StatsLateCollisions,
5342 "LateCollisions");
5343 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5344 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5345 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5346 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5347 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5348 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5349 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5350 children, txstats.dot3StatsCarrierSenseErrors,
5351 "CarrierSenseErrors");
5352 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5353 children, txstats.ifOutDiscards, "Discards");
5354 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5355 children, txstats.ifOutErrors, "Errors");
5356}
5357
5358#undef BGE_SYSCTL_STAT
5359
5360#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5361 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5362
5363static void
5364bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5365 struct sysctl_oid_list *parent)
5366{
5367 struct sysctl_oid *tree;
5368 struct sysctl_oid_list *child, *schild;
5369 struct bge_mac_stats *stats;
5370
5371 stats = &sc->bge_mac_stats;
5372 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5373 NULL, "BGE Statistics");
5374 schild = child = SYSCTL_CHILDREN(tree);
5375 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5376 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5377 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5378 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5379 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5380 &stats->DmaWriteHighPriQueueFull,
5381 "NIC DMA Write High Priority Queue Full");
5382 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5383 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5384 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5385 &stats->InputDiscards, "Discarded Input Frames");
5386 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5387 &stats->InputErrors, "Input Errors");
5388 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5389 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5390
5391 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5392 NULL, "BGE RX Statistics");
5393 child = SYSCTL_CHILDREN(tree);
5394 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5395 &stats->ifHCInOctets, "Inbound Octets");
5396 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5397 &stats->etherStatsFragments, "Fragments");
5398 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5399 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5400 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5401 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5402 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5403 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5404 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5405 &stats->dot3StatsFCSErrors, "FCS Errors");
5406 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5407 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5408 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5409 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5410 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5411 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5412 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5413 &stats->macControlFramesReceived, "MAC Control Frames Received");
5414 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5415 &stats->xoffStateEntered, "XOFF State Entered");
5416 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5417 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5418 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5419 &stats->etherStatsJabbers, "Jabbers");
5420 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5421 &stats->etherStatsUndersizePkts, "Undersized Packets");
5422
5423 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5424 NULL, "BGE TX Statistics");
5425 child = SYSCTL_CHILDREN(tree);
5426 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5427 &stats->ifHCOutOctets, "Outbound Octets");
5428 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5429 &stats->etherStatsCollisions, "TX Collisions");
5430 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5431 &stats->outXonSent, "XON Sent");
5432 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5433 &stats->outXoffSent, "XOFF Sent");
5434 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5435 &stats->dot3StatsInternalMacTransmitErrors,
5436 "Internal MAC TX Errors");
5437 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5438 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5439 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5440 &stats->dot3StatsMultipleCollisionFrames,
5441 "Multiple Collision Frames");
5442 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5443 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5444 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5445 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5446 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5447 &stats->dot3StatsLateCollisions, "Late Collisions");
5448 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5449 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5450 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5451 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5452 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5453 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5454}
5455
5456#undef BGE_SYSCTL_STAT_ADD64
5457
5458static int
5459bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5460{
5461 struct bge_softc *sc;
5462 uint32_t result;
5463 int offset;
5464
5465 sc = (struct bge_softc *)arg1;
5466 offset = arg2;
5467 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5468 offsetof(bge_hostaddr, bge_addr_lo));
5469 return (sysctl_handle_int(oidp, &result, 0, req));
5470}
5471
5472#ifdef BGE_REGISTER_DEBUG
5473static int
5474bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5475{
5476 struct bge_softc *sc;
5477 uint16_t *sbdata;
5478 int error;
5479 int result;
5480 int i, j;
5481
5482 result = -1;
5483 error = sysctl_handle_int(oidp, &result, 0, req);
5484 if (error || (req->newptr == NULL))
5485 return (error);
5486
5487 if (result == 1) {
5488 sc = (struct bge_softc *)arg1;
5489
5490 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5491 printf("Status Block:\n");
5492 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5493 printf("%06x:", i);
5494 for (j = 0; j < 8; j++) {
5495 printf(" %04x", sbdata[i]);
5496 i += 4;
5497 }
5498 printf("\n");
5499 }
5500
5501 printf("Registers:\n");
5502 for (i = 0x800; i < 0xA00; ) {
5503 printf("%06x:", i);
5504 for (j = 0; j < 8; j++) {
5505 printf(" %08x", CSR_READ_4(sc, i));
5506 i += 4;
5507 }
5508 printf("\n");
5509 }
5510
5511 printf("Hardware Flags:\n");
5512 if (BGE_IS_5755_PLUS(sc))
5513 printf(" - 5755 Plus\n");
5514 if (BGE_IS_575X_PLUS(sc))
5515 printf(" - 575X Plus\n");
5516 if (BGE_IS_5705_PLUS(sc))
5517 printf(" - 5705 Plus\n");
5518 if (BGE_IS_5714_FAMILY(sc))
5519 printf(" - 5714 Family\n");
5520 if (BGE_IS_5700_FAMILY(sc))
5521 printf(" - 5700 Family\n");
5522 if (sc->bge_flags & BGE_FLAG_JUMBO)
5523 printf(" - Supports Jumbo Frames\n");
5524 if (sc->bge_flags & BGE_FLAG_PCIX)
5525 printf(" - PCI-X Bus\n");
5526 if (sc->bge_flags & BGE_FLAG_PCIE)
5527 printf(" - PCI Express Bus\n");
5528 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5529 printf(" - No 3 LEDs\n");
5530 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5531 printf(" - RX Alignment Bug\n");
5532 }
5533
5534 return (error);
5535}
5536
5537static int
5538bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5539{
5540 struct bge_softc *sc;
5541 int error;
5542 uint16_t result;
5543 uint32_t val;
5544
5545 result = -1;
5546 error = sysctl_handle_int(oidp, &result, 0, req);
5547 if (error || (req->newptr == NULL))
5548 return (error);
5549
5550 if (result < 0x8000) {
5551 sc = (struct bge_softc *)arg1;
5552 val = CSR_READ_4(sc, result);
5553 printf("reg 0x%06X = 0x%08X\n", result, val);
5554 }
5555
5556 return (error);
5557}
5558
5559static int
5560bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5561{
5562 struct bge_softc *sc;
5563 int error;
5564 uint16_t result;
5565 uint32_t val;
5566
5567 result = -1;
5568 error = sysctl_handle_int(oidp, &result, 0, req);
5569 if (error || (req->newptr == NULL))
5570 return (error);
5571
5572 if (result < 0x8000) {
5573 sc = (struct bge_softc *)arg1;
5574 val = bge_readmem_ind(sc, result);
5575 printf("mem 0x%06X = 0x%08X\n", result, val);
5576 }
5577
5578 return (error);
5579}
5580#endif
5581
5582static int
5583bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5584{
5585
5586 if (sc->bge_flags & BGE_FLAG_EADDR)
5587 return (1);
5588
5589#ifdef __sparc64__
5590 OF_getetheraddr(sc->bge_dev, ether_addr);
5591 return (0);
5592#endif
5593 return (1);
5594}
5595
5596static int
5597bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5598{
5599 uint32_t mac_addr;
5600
5601 mac_addr = bge_readmem_ind(sc, 0x0c14);
5602 if ((mac_addr >> 16) == 0x484b) {
5603 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5604 ether_addr[1] = (uint8_t)mac_addr;
5605 mac_addr = bge_readmem_ind(sc, 0x0c18);
5606 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5607 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5608 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5609 ether_addr[5] = (uint8_t)mac_addr;
5610 return (0);
5611 }
5612 return (1);
5613}
5614
5615static int
5616bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5617{
5618 int mac_offset = BGE_EE_MAC_OFFSET;
5619
5620 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5621 mac_offset = BGE_EE_MAC_OFFSET_5906;
5622
5623 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5624 ETHER_ADDR_LEN));
5625}
5626
5627static int
5628bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5629{
5630
5631 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5632 return (1);
5633
5634 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5635 ETHER_ADDR_LEN));
5636}
5637
5638static int
5639bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5640{
5641 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5642 /* NOTE: Order is critical */
5643 bge_get_eaddr_fw,
5644 bge_get_eaddr_mem,
5645 bge_get_eaddr_nvram,
5646 bge_get_eaddr_eeprom,
5647 NULL
5648 };
5649 const bge_eaddr_fcn_t *func;
5650
5651 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5652 if ((*func)(sc, eaddr) == 0)
5653 break;
5654 }
5655 return (*func == NULL ? ENXIO : 0);
5656}