Deleted Added
full compact
if_bge.c (213812) if_bge.c (213844)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 213812 2010-10-13 22:29:48Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 213844 2010-10-14 18:31:40Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
218
219 { SK_VENDORID, SK_DEVICEID_ALTIMA },
220
221 { TC_VENDORID, TC_DEVICEID_3C996 },
222
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
226
227 { 0, 0 }
228};
229
230static const struct bge_vendor {
231 uint16_t v_id;
232 const char *v_name;
233} bge_vendors[] = {
234 { ALTEON_VENDORID, "Alteon" },
235 { ALTIMA_VENDORID, "Altima" },
236 { APPLE_VENDORID, "Apple" },
237 { BCOM_VENDORID, "Broadcom" },
238 { SK_VENDORID, "SysKonnect" },
239 { TC_VENDORID, "3Com" },
240 { FJTSU_VENDORID, "Fujitsu" },
241
242 { 0, NULL }
243};
244
245static const struct bge_revision {
246 uint32_t br_chipid;
247 const char *br_name;
248} bge_revisions[] = {
249 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
250 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
251 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
252 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
253 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
254 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
255 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
256 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
257 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
258 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
259 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
260 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
261 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
262 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
263 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
264 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
265 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
266 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
267 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
268 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
269 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
270 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
271 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
272 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
273 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
274 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
275 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
276 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
277 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
278 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
279 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
280 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
281 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
282 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
283 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
284 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
285 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
286 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
287 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
288 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
289 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
290 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
291 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
292 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
293 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
294 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
295 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
296 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
297 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
298 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
299 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
302 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
303 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
304 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
305 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
306 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
307 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
308
309 { 0, NULL }
310};
311
312/*
313 * Some defaults for major revisions, so that newer steppings
314 * that we don't know about have a shot at working.
315 */
316static const struct bge_revision bge_majorrevs[] = {
317 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
318 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
319 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
320 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
321 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
322 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
323 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
324 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
325 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
326 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
327 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
328 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
329 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
330 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
331 /* 5754 and 5787 share the same ASIC ID */
332 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
333 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
334 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
335
336 { 0, NULL }
337};
338
339#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
340#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
341#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
342#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
343#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
344#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
345
346const struct bge_revision * bge_lookup_rev(uint32_t);
347const struct bge_vendor * bge_lookup_vendor(uint16_t);
348
349typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
350
351static int bge_probe(device_t);
352static int bge_attach(device_t);
353static int bge_detach(device_t);
354static int bge_suspend(device_t);
355static int bge_resume(device_t);
356static void bge_release_resources(struct bge_softc *);
357static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
358static int bge_dma_alloc(struct bge_softc *);
359static void bge_dma_free(struct bge_softc *);
360static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
361 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
362
363static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
364static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
365static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
366static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
367static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
368
369static void bge_txeof(struct bge_softc *, uint16_t);
370static int bge_rxeof(struct bge_softc *, uint16_t, int);
371
372static void bge_asf_driver_up (struct bge_softc *);
373static void bge_tick(void *);
374static void bge_stats_clear_regs(struct bge_softc *);
375static void bge_stats_update(struct bge_softc *);
376static void bge_stats_update_regs(struct bge_softc *);
377static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
378 uint16_t *);
379static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
380
381static void bge_intr(void *);
382static int bge_msi_intr(void *);
383static void bge_intr_task(void *, int);
384static void bge_start_locked(struct ifnet *);
385static void bge_start(struct ifnet *);
386static int bge_ioctl(struct ifnet *, u_long, caddr_t);
387static void bge_init_locked(struct bge_softc *);
388static void bge_init(void *);
389static void bge_stop(struct bge_softc *);
390static void bge_watchdog(struct bge_softc *);
391static int bge_shutdown(device_t);
392static int bge_ifmedia_upd_locked(struct ifnet *);
393static int bge_ifmedia_upd(struct ifnet *);
394static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
395
396static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
397static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
398
399static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
400static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
401
402static void bge_setpromisc(struct bge_softc *);
403static void bge_setmulti(struct bge_softc *);
404static void bge_setvlan(struct bge_softc *);
405
406static __inline void bge_rxreuse_std(struct bge_softc *, int);
407static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
408static int bge_newbuf_std(struct bge_softc *, int);
409static int bge_newbuf_jumbo(struct bge_softc *, int);
410static int bge_init_rx_ring_std(struct bge_softc *);
411static void bge_free_rx_ring_std(struct bge_softc *);
412static int bge_init_rx_ring_jumbo(struct bge_softc *);
413static void bge_free_rx_ring_jumbo(struct bge_softc *);
414static void bge_free_tx_ring(struct bge_softc *);
415static int bge_init_tx_ring(struct bge_softc *);
416
417static int bge_chipinit(struct bge_softc *);
418static int bge_blockinit(struct bge_softc *);
419
420static int bge_has_eaddr(struct bge_softc *);
421static uint32_t bge_readmem_ind(struct bge_softc *, int);
422static void bge_writemem_ind(struct bge_softc *, int, int);
423static void bge_writembx(struct bge_softc *, int, int);
424#ifdef notdef
425static uint32_t bge_readreg_ind(struct bge_softc *, int);
426#endif
427static void bge_writemem_direct(struct bge_softc *, int, int);
428static void bge_writereg_ind(struct bge_softc *, int, int);
429
430static int bge_miibus_readreg(device_t, int, int);
431static int bge_miibus_writereg(device_t, int, int, int);
432static void bge_miibus_statchg(device_t);
433#ifdef DEVICE_POLLING
434static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
435#endif
436
437#define BGE_RESET_START 1
438#define BGE_RESET_STOP 2
439static void bge_sig_post_reset(struct bge_softc *, int);
440static void bge_sig_legacy(struct bge_softc *, int);
441static void bge_sig_pre_reset(struct bge_softc *, int);
442static void bge_stop_fw(struct bge_softc *);
443static int bge_reset(struct bge_softc *);
444static void bge_link_upd(struct bge_softc *);
445
446/*
447 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
448 * leak information to untrusted users. It is also known to cause alignment
449 * traps on certain architectures.
450 */
451#ifdef BGE_REGISTER_DEBUG
452static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
453static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
454static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
455#endif
456static void bge_add_sysctls(struct bge_softc *);
457static void bge_add_sysctl_stats_regs(struct bge_softc *,
458 struct sysctl_ctx_list *, struct sysctl_oid_list *);
459static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
460 struct sysctl_oid_list *);
461static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
462
463static device_method_t bge_methods[] = {
464 /* Device interface */
465 DEVMETHOD(device_probe, bge_probe),
466 DEVMETHOD(device_attach, bge_attach),
467 DEVMETHOD(device_detach, bge_detach),
468 DEVMETHOD(device_shutdown, bge_shutdown),
469 DEVMETHOD(device_suspend, bge_suspend),
470 DEVMETHOD(device_resume, bge_resume),
471
472 /* bus interface */
473 DEVMETHOD(bus_print_child, bus_generic_print_child),
474 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
475
476 /* MII interface */
477 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
478 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
479 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
480
481 { 0, 0 }
482};
483
484static driver_t bge_driver = {
485 "bge",
486 bge_methods,
487 sizeof(struct bge_softc)
488};
489
490static devclass_t bge_devclass;
491
492DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
493DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
494
495static int bge_allow_asf = 1;
496
497TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
498
499SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
500SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
501 "Allow ASF mode if available");
502
503#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
504#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
505#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
506#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
507#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
508
509static int
510bge_has_eaddr(struct bge_softc *sc)
511{
512#ifdef __sparc64__
513 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
514 device_t dev;
515 uint32_t subvendor;
516
517 dev = sc->bge_dev;
518
519 /*
520 * The on-board BGEs found in sun4u machines aren't fitted with
521 * an EEPROM which means that we have to obtain the MAC address
522 * via OFW and that some tests will always fail. We distinguish
523 * such BGEs by the subvendor ID, which also has to be obtained
524 * from OFW instead of the PCI configuration space as the latter
525 * indicates Broadcom as the subvendor of the netboot interface.
526 * For early Blade 1500 and 2500 we even have to check the OFW
527 * device path as the subvendor ID always defaults to Broadcom
528 * there.
529 */
530 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
531 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
532 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
533 return (0);
534 memset(buf, 0, sizeof(buf));
535 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
536 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
537 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
538 return (0);
539 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
540 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
541 return (0);
542 }
543#endif
544 return (1);
545}
546
547static uint32_t
548bge_readmem_ind(struct bge_softc *sc, int off)
549{
550 device_t dev;
551 uint32_t val;
552
553 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
554 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
555 return (0);
556
557 dev = sc->bge_dev;
558
559 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
560 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
561 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
562 return (val);
563}
564
565static void
566bge_writemem_ind(struct bge_softc *sc, int off, int val)
567{
568 device_t dev;
569
570 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
571 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
572 return;
573
574 dev = sc->bge_dev;
575
576 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
577 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
578 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
579}
580
581#ifdef notdef
582static uint32_t
583bge_readreg_ind(struct bge_softc *sc, int off)
584{
585 device_t dev;
586
587 dev = sc->bge_dev;
588
589 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
590 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
591}
592#endif
593
594static void
595bge_writereg_ind(struct bge_softc *sc, int off, int val)
596{
597 device_t dev;
598
599 dev = sc->bge_dev;
600
601 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
602 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
603}
604
605static void
606bge_writemem_direct(struct bge_softc *sc, int off, int val)
607{
608 CSR_WRITE_4(sc, off, val);
609}
610
611static void
612bge_writembx(struct bge_softc *sc, int off, int val)
613{
614 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
615 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
616
617 CSR_WRITE_4(sc, off, val);
618}
619
620/*
621 * Map a single buffer address.
622 */
623
624static void
625bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
626{
627 struct bge_dmamap_arg *ctx;
628
629 if (error)
630 return;
631
632 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
633
634 ctx = arg;
635 ctx->bge_busaddr = segs->ds_addr;
636}
637
638static uint8_t
639bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
640{
641 uint32_t access, byte = 0;
642 int i;
643
644 /* Lock. */
645 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
646 for (i = 0; i < 8000; i++) {
647 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
648 break;
649 DELAY(20);
650 }
651 if (i == 8000)
652 return (1);
653
654 /* Enable access. */
655 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
656 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
657
658 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
659 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
660 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
661 DELAY(10);
662 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
663 DELAY(10);
664 break;
665 }
666 }
667
668 if (i == BGE_TIMEOUT * 10) {
669 if_printf(sc->bge_ifp, "nvram read timed out\n");
670 return (1);
671 }
672
673 /* Get result. */
674 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
675
676 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
677
678 /* Disable access. */
679 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
680
681 /* Unlock. */
682 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
683 CSR_READ_4(sc, BGE_NVRAM_SWARB);
684
685 return (0);
686}
687
688/*
689 * Read a sequence of bytes from NVRAM.
690 */
691static int
692bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
693{
694 int err = 0, i;
695 uint8_t byte = 0;
696
697 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
698 return (1);
699
700 for (i = 0; i < cnt; i++) {
701 err = bge_nvram_getbyte(sc, off + i, &byte);
702 if (err)
703 break;
704 *(dest + i) = byte;
705 }
706
707 return (err ? 1 : 0);
708}
709
710/*
711 * Read a byte of data stored in the EEPROM at address 'addr.' The
712 * BCM570x supports both the traditional bitbang interface and an
713 * auto access interface for reading the EEPROM. We use the auto
714 * access method.
715 */
716static uint8_t
717bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
718{
719 int i;
720 uint32_t byte = 0;
721
722 /*
723 * Enable use of auto EEPROM access so we can avoid
724 * having to use the bitbang method.
725 */
726 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
727
728 /* Reset the EEPROM, load the clock period. */
729 CSR_WRITE_4(sc, BGE_EE_ADDR,
730 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
731 DELAY(20);
732
733 /* Issue the read EEPROM command. */
734 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
735
736 /* Wait for completion */
737 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
738 DELAY(10);
739 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
740 break;
741 }
742
743 if (i == BGE_TIMEOUT * 10) {
744 device_printf(sc->bge_dev, "EEPROM read timed out\n");
745 return (1);
746 }
747
748 /* Get result. */
749 byte = CSR_READ_4(sc, BGE_EE_DATA);
750
751 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
752
753 return (0);
754}
755
756/*
757 * Read a sequence of bytes from the EEPROM.
758 */
759static int
760bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
761{
762 int i, error = 0;
763 uint8_t byte = 0;
764
765 for (i = 0; i < cnt; i++) {
766 error = bge_eeprom_getbyte(sc, off + i, &byte);
767 if (error)
768 break;
769 *(dest + i) = byte;
770 }
771
772 return (error ? 1 : 0);
773}
774
775static int
776bge_miibus_readreg(device_t dev, int phy, int reg)
777{
778 struct bge_softc *sc;
779 uint32_t val;
780 int i;
781
782 sc = device_get_softc(dev);
783
784 /* Prevent the probe from finding incorrect devices. */
785 if (phy != sc->bge_phy_addr)
786 return (0);
787
788 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
789 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
790 CSR_WRITE_4(sc, BGE_MI_MODE,
791 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
792 DELAY(80);
793 }
794
795 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
796 BGE_MIPHY(phy) | BGE_MIREG(reg));
797
798 /* Poll for the PHY register access to complete. */
799 for (i = 0; i < BGE_TIMEOUT; i++) {
800 DELAY(10);
801 val = CSR_READ_4(sc, BGE_MI_COMM);
802 if ((val & BGE_MICOMM_BUSY) == 0) {
803 DELAY(5);
804 val = CSR_READ_4(sc, BGE_MI_COMM);
805 break;
806 }
807 }
808
809 if (i == BGE_TIMEOUT) {
810 device_printf(sc->bge_dev,
811 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
812 phy, reg, val);
813 val = 0;
814 }
815
816 /* Restore the autopoll bit if necessary. */
817 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
818 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
819 DELAY(80);
820 }
821
822 if (val & BGE_MICOMM_READFAIL)
823 return (0);
824
825 return (val & 0xFFFF);
826}
827
828static int
829bge_miibus_writereg(device_t dev, int phy, int reg, int val)
830{
831 struct bge_softc *sc;
832 int i;
833
834 sc = device_get_softc(dev);
835
836 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
837 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
838 return (0);
839
840 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
841 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
842 CSR_WRITE_4(sc, BGE_MI_MODE,
843 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
844 DELAY(80);
845 }
846
847 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
848 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
849
850 for (i = 0; i < BGE_TIMEOUT; i++) {
851 DELAY(10);
852 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
853 DELAY(5);
854 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
855 break;
856 }
857 }
858
859 /* Restore the autopoll bit if necessary. */
860 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
861 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
862 DELAY(80);
863 }
864
865 if (i == BGE_TIMEOUT)
866 device_printf(sc->bge_dev,
867 "PHY write timed out (phy %d, reg %d, val %d)\n",
868 phy, reg, val);
869
870 return (0);
871}
872
873static void
874bge_miibus_statchg(device_t dev)
875{
876 struct bge_softc *sc;
877 struct mii_data *mii;
878 sc = device_get_softc(dev);
879 mii = device_get_softc(sc->bge_miibus);
880
881 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
882 (IFM_ACTIVE | IFM_AVALID)) {
883 switch (IFM_SUBTYPE(mii->mii_media_active)) {
884 case IFM_10_T:
885 case IFM_100_TX:
886 sc->bge_link = 1;
887 break;
888 case IFM_1000_T:
889 case IFM_1000_SX:
890 case IFM_2500_SX:
891 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
892 sc->bge_link = 1;
893 else
894 sc->bge_link = 0;
895 break;
896 default:
897 sc->bge_link = 0;
898 break;
899 }
900 } else
901 sc->bge_link = 0;
902 if (sc->bge_link == 0)
903 return;
904 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
905 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
906 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
907 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
908 else
909 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
910
911 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
912 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
913 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG1)
914 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
915 else
916 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
917 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0)
918 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
919 else
920 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
921 } else {
922 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
923 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
924 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
925 }
926}
927
928/*
929 * Intialize a standard receive ring descriptor.
930 */
931static int
932bge_newbuf_std(struct bge_softc *sc, int i)
933{
934 struct mbuf *m;
935 struct bge_rx_bd *r;
936 bus_dma_segment_t segs[1];
937 bus_dmamap_t map;
938 int error, nsegs;
939
940 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
941 if (m == NULL)
942 return (ENOBUFS);
943 m->m_len = m->m_pkthdr.len = MCLBYTES;
944 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
945 m_adj(m, ETHER_ALIGN);
946
947 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
948 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
949 if (error != 0) {
950 m_freem(m);
951 return (error);
952 }
953 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
954 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
955 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
956 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
957 sc->bge_cdata.bge_rx_std_dmamap[i]);
958 }
959 map = sc->bge_cdata.bge_rx_std_dmamap[i];
960 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
961 sc->bge_cdata.bge_rx_std_sparemap = map;
962 sc->bge_cdata.bge_rx_std_chain[i] = m;
963 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
964 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
965 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
966 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
967 r->bge_flags = BGE_RXBDFLAG_END;
968 r->bge_len = segs[0].ds_len;
969 r->bge_idx = i;
970
971 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
972 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
973
974 return (0);
975}
976
977/*
978 * Initialize a jumbo receive ring descriptor. This allocates
979 * a jumbo buffer from the pool managed internally by the driver.
980 */
981static int
982bge_newbuf_jumbo(struct bge_softc *sc, int i)
983{
984 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
985 bus_dmamap_t map;
986 struct bge_extrx_bd *r;
987 struct mbuf *m;
988 int error, nsegs;
989
990 MGETHDR(m, M_DONTWAIT, MT_DATA);
991 if (m == NULL)
992 return (ENOBUFS);
993
994 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
995 if (!(m->m_flags & M_EXT)) {
996 m_freem(m);
997 return (ENOBUFS);
998 }
999 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1000 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1001 m_adj(m, ETHER_ALIGN);
1002
1003 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1004 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1005 if (error != 0) {
1006 m_freem(m);
1007 return (error);
1008 }
1009
1010 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1011 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1012 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1013 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1014 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1015 }
1016 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1017 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1018 sc->bge_cdata.bge_rx_jumbo_sparemap;
1019 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1020 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1021 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1022 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1023 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1024 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1025
1026 /*
1027 * Fill in the extended RX buffer descriptor.
1028 */
1029 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1030 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1031 r->bge_idx = i;
1032 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1033 switch (nsegs) {
1034 case 4:
1035 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1036 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1037 r->bge_len3 = segs[3].ds_len;
1038 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1039 case 3:
1040 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1041 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1042 r->bge_len2 = segs[2].ds_len;
1043 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1044 case 2:
1045 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1046 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1047 r->bge_len1 = segs[1].ds_len;
1048 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1049 case 1:
1050 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1051 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1052 r->bge_len0 = segs[0].ds_len;
1053 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1054 break;
1055 default:
1056 panic("%s: %d segments\n", __func__, nsegs);
1057 }
1058
1059 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1060 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1061
1062 return (0);
1063}
1064
1065static int
1066bge_init_rx_ring_std(struct bge_softc *sc)
1067{
1068 int error, i;
1069
1070 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1071 sc->bge_std = 0;
1072 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1073 if ((error = bge_newbuf_std(sc, i)) != 0)
1074 return (error);
1075 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1076 }
1077
1078 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1079 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1080
1081 sc->bge_std = 0;
1082 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1083
1084 return (0);
1085}
1086
1087static void
1088bge_free_rx_ring_std(struct bge_softc *sc)
1089{
1090 int i;
1091
1092 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1093 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1094 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1095 sc->bge_cdata.bge_rx_std_dmamap[i],
1096 BUS_DMASYNC_POSTREAD);
1097 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1098 sc->bge_cdata.bge_rx_std_dmamap[i]);
1099 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1100 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1101 }
1102 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1103 sizeof(struct bge_rx_bd));
1104 }
1105}
1106
1107static int
1108bge_init_rx_ring_jumbo(struct bge_softc *sc)
1109{
1110 struct bge_rcb *rcb;
1111 int error, i;
1112
1113 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1114 sc->bge_jumbo = 0;
1115 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1116 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1117 return (error);
1118 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1119 }
1120
1121 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1122 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1123
1124 sc->bge_jumbo = 0;
1125
1126 /* Enable the jumbo receive producer ring. */
1127 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1128 rcb->bge_maxlen_flags =
1129 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1130 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1131
1132 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1133
1134 return (0);
1135}
1136
1137static void
1138bge_free_rx_ring_jumbo(struct bge_softc *sc)
1139{
1140 int i;
1141
1142 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1143 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1144 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1145 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1146 BUS_DMASYNC_POSTREAD);
1147 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1148 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1149 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1150 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1151 }
1152 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1153 sizeof(struct bge_extrx_bd));
1154 }
1155}
1156
1157static void
1158bge_free_tx_ring(struct bge_softc *sc)
1159{
1160 int i;
1161
1162 if (sc->bge_ldata.bge_tx_ring == NULL)
1163 return;
1164
1165 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1166 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1167 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1168 sc->bge_cdata.bge_tx_dmamap[i],
1169 BUS_DMASYNC_POSTWRITE);
1170 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1171 sc->bge_cdata.bge_tx_dmamap[i]);
1172 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1173 sc->bge_cdata.bge_tx_chain[i] = NULL;
1174 }
1175 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1176 sizeof(struct bge_tx_bd));
1177 }
1178}
1179
1180static int
1181bge_init_tx_ring(struct bge_softc *sc)
1182{
1183 sc->bge_txcnt = 0;
1184 sc->bge_tx_saved_considx = 0;
1185
1186 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1187 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1188 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1189
1190 /* Initialize transmit producer index for host-memory send ring. */
1191 sc->bge_tx_prodidx = 0;
1192 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1193
1194 /* 5700 b2 errata */
1195 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1196 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1197
1198 /* NIC-memory send ring not used; initialize to zero. */
1199 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1200 /* 5700 b2 errata */
1201 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1202 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1203
1204 return (0);
1205}
1206
1207static void
1208bge_setpromisc(struct bge_softc *sc)
1209{
1210 struct ifnet *ifp;
1211
1212 BGE_LOCK_ASSERT(sc);
1213
1214 ifp = sc->bge_ifp;
1215
1216 /* Enable or disable promiscuous mode as needed. */
1217 if (ifp->if_flags & IFF_PROMISC)
1218 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1219 else
1220 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1221}
1222
1223static void
1224bge_setmulti(struct bge_softc *sc)
1225{
1226 struct ifnet *ifp;
1227 struct ifmultiaddr *ifma;
1228 uint32_t hashes[4] = { 0, 0, 0, 0 };
1229 int h, i;
1230
1231 BGE_LOCK_ASSERT(sc);
1232
1233 ifp = sc->bge_ifp;
1234
1235 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1236 for (i = 0; i < 4; i++)
1237 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1238 return;
1239 }
1240
1241 /* First, zot all the existing filters. */
1242 for (i = 0; i < 4; i++)
1243 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1244
1245 /* Now program new ones. */
1246 if_maddr_rlock(ifp);
1247 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1248 if (ifma->ifma_addr->sa_family != AF_LINK)
1249 continue;
1250 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1251 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1252 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1253 }
1254 if_maddr_runlock(ifp);
1255
1256 for (i = 0; i < 4; i++)
1257 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1258}
1259
1260static void
1261bge_setvlan(struct bge_softc *sc)
1262{
1263 struct ifnet *ifp;
1264
1265 BGE_LOCK_ASSERT(sc);
1266
1267 ifp = sc->bge_ifp;
1268
1269 /* Enable or disable VLAN tag stripping as needed. */
1270 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1271 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1272 else
1273 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1274}
1275
1276static void
1277bge_sig_pre_reset(struct bge_softc *sc, int type)
1278{
1279
1280 /*
1281 * Some chips don't like this so only do this if ASF is enabled
1282 */
1283 if (sc->bge_asf_mode)
1284 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1285
1286 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1287 switch (type) {
1288 case BGE_RESET_START:
1289 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1290 break;
1291 case BGE_RESET_STOP:
1292 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1293 break;
1294 }
1295 }
1296}
1297
1298static void
1299bge_sig_post_reset(struct bge_softc *sc, int type)
1300{
1301
1302 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1303 switch (type) {
1304 case BGE_RESET_START:
1305 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1306 /* START DONE */
1307 break;
1308 case BGE_RESET_STOP:
1309 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1310 break;
1311 }
1312 }
1313}
1314
1315static void
1316bge_sig_legacy(struct bge_softc *sc, int type)
1317{
1318
1319 if (sc->bge_asf_mode) {
1320 switch (type) {
1321 case BGE_RESET_START:
1322 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1323 break;
1324 case BGE_RESET_STOP:
1325 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1326 break;
1327 }
1328 }
1329}
1330
1331static void
1332bge_stop_fw(struct bge_softc *sc)
1333{
1334 int i;
1335
1336 if (sc->bge_asf_mode) {
1337 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1338 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1339 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1340
1341 for (i = 0; i < 100; i++ ) {
1342 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1343 break;
1344 DELAY(10);
1345 }
1346 }
1347}
1348
1349/*
1350 * Do endian, PCI and DMA initialization.
1351 */
1352static int
1353bge_chipinit(struct bge_softc *sc)
1354{
1355 uint32_t dma_rw_ctl;
1356 uint16_t val;
1357 int i;
1358
1359 /* Set endianness before we access any non-PCI registers. */
1360 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1361
1362 /* Clear the MAC control register */
1363 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1364
1365 /*
1366 * Clear the MAC statistics block in the NIC's
1367 * internal memory.
1368 */
1369 for (i = BGE_STATS_BLOCK;
1370 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1371 BGE_MEMWIN_WRITE(sc, i, 0);
1372
1373 for (i = BGE_STATUS_BLOCK;
1374 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1375 BGE_MEMWIN_WRITE(sc, i, 0);
1376
1377 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1378 /*
1379 * Fix data corruption caused by non-qword write with WB.
1380 * Fix master abort in PCI mode.
1381 * Fix PCI latency timer.
1382 */
1383 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1384 val |= (1 << 10) | (1 << 12) | (1 << 13);
1385 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1386 }
1387
1388 /*
1389 * Set up the PCI DMA control register.
1390 */
1391 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1392 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1393 if (sc->bge_flags & BGE_FLAG_PCIE) {
1394 /* Read watermark not used, 128 bytes for write. */
1395 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1396 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1397 if (BGE_IS_5714_FAMILY(sc)) {
1398 /* 256 bytes for read and write. */
1399 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1400 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1401 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1402 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1403 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1404 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1405 /*
1406 * In the BCM5703, the DMA read watermark should
1407 * be set to less than or equal to the maximum
1408 * memory read byte count of the PCI-X command
1409 * register.
1410 */
1411 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1412 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1413 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1414 /* 1536 bytes for read, 384 bytes for write. */
1415 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1416 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1417 } else {
1418 /* 384 bytes for read and write. */
1419 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1420 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1421 0x0F;
1422 }
1423 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1424 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1425 uint32_t tmp;
1426
1427 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1428 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1429 if (tmp == 6 || tmp == 7)
1430 dma_rw_ctl |=
1431 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1432
1433 /* Set PCI-X DMA write workaround. */
1434 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1435 }
1436 } else {
1437 /* Conventional PCI bus: 256 bytes for read and write. */
1438 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1439 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1440
1441 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1442 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1443 dma_rw_ctl |= 0x0F;
1444 }
1445 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1446 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1447 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1448 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1449 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1450 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1451 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1452 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1453
1454 /*
1455 * Set up general mode register.
1456 */
1457 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1458 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1459 BGE_MODECTL_TX_NO_PHDR_CSUM);
1460
1461 /*
1462 * BCM5701 B5 have a bug causing data corruption when using
1463 * 64-bit DMA reads, which can be terminated early and then
1464 * completed later as 32-bit accesses, in combination with
1465 * certain bridges.
1466 */
1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1468 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1469 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1470
1471 /*
1472 * Tell the firmware the driver is running
1473 */
1474 if (sc->bge_asf_mode & ASF_STACKUP)
1475 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1476
1477 /*
1478 * Disable memory write invalidate. Apparently it is not supported
1479 * properly by these devices. Also ensure that INTx isn't disabled,
1480 * as these chips need it even when using MSI.
1481 */
1482 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1483 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1484
1485 /* Set the timer prescaler (always 66Mhz) */
1486 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1487
1488 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1489 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1490 DELAY(40); /* XXX */
1491
1492 /* Put PHY into ready state */
1493 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1494 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1495 DELAY(40);
1496 }
1497
1498 return (0);
1499}
1500
1501static int
1502bge_blockinit(struct bge_softc *sc)
1503{
1504 struct bge_rcb *rcb;
1505 bus_size_t vrcb;
1506 bge_hostaddr taddr;
1507 uint32_t val;
1508 int i, limit;
1509
1510 /*
1511 * Initialize the memory window pointer register so that
1512 * we can access the first 32K of internal NIC RAM. This will
1513 * allow us to set up the TX send ring RCBs and the RX return
1514 * ring RCBs, plus other things which live in NIC memory.
1515 */
1516 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1517
1518 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1519
1520 if (!(BGE_IS_5705_PLUS(sc))) {
1521 /* Configure mbuf memory pool */
1522 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1523 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1524 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1525 else
1526 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1527
1528 /* Configure DMA resource pool */
1529 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1530 BGE_DMA_DESCRIPTORS);
1531 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1532 }
1533
1534 /* Configure mbuf pool watermarks */
1535 if (!BGE_IS_5705_PLUS(sc)) {
1536 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1537 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1538 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1539 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1540 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1541 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1542 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1543 } else {
1544 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1545 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1546 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1547 }
1548
1549 /* Configure DMA resource watermarks */
1550 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1551 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1552
1553 /* Enable buffer manager */
1554 if (!(BGE_IS_5705_PLUS(sc))) {
1555 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1556 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1557
1558 /* Poll for buffer manager start indication */
1559 for (i = 0; i < BGE_TIMEOUT; i++) {
1560 DELAY(10);
1561 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1562 break;
1563 }
1564
1565 if (i == BGE_TIMEOUT) {
1566 device_printf(sc->bge_dev,
1567 "buffer manager failed to start\n");
1568 return (ENXIO);
1569 }
1570 }
1571
1572 /* Enable flow-through queues */
1573 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1574 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1575
1576 /* Wait until queue initialization is complete */
1577 for (i = 0; i < BGE_TIMEOUT; i++) {
1578 DELAY(10);
1579 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1580 break;
1581 }
1582
1583 if (i == BGE_TIMEOUT) {
1584 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1585 return (ENXIO);
1586 }
1587
1588 /*
1589 * Summary of rings supported by the controller:
1590 *
1591 * Standard Receive Producer Ring
1592 * - This ring is used to feed receive buffers for "standard"
1593 * sized frames (typically 1536 bytes) to the controller.
1594 *
1595 * Jumbo Receive Producer Ring
1596 * - This ring is used to feed receive buffers for jumbo sized
1597 * frames (i.e. anything bigger than the "standard" frames)
1598 * to the controller.
1599 *
1600 * Mini Receive Producer Ring
1601 * - This ring is used to feed receive buffers for "mini"
1602 * sized frames to the controller.
1603 * - This feature required external memory for the controller
1604 * but was never used in a production system. Should always
1605 * be disabled.
1606 *
1607 * Receive Return Ring
1608 * - After the controller has placed an incoming frame into a
1609 * receive buffer that buffer is moved into a receive return
1610 * ring. The driver is then responsible to passing the
1611 * buffer up to the stack. Many versions of the controller
1612 * support multiple RR rings.
1613 *
1614 * Send Ring
1615 * - This ring is used for outgoing frames. Many versions of
1616 * the controller support multiple send rings.
1617 */
1618
1619 /* Initialize the standard receive producer ring control block. */
1620 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1621 rcb->bge_hostaddr.bge_addr_lo =
1622 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1623 rcb->bge_hostaddr.bge_addr_hi =
1624 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1625 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1626 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1627 if (BGE_IS_5705_PLUS(sc)) {
1628 /*
1629 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1630 * Bits 15-2 : Reserved (should be 0)
1631 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1632 * Bit 0 : Reserved
1633 */
1634 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1635 } else {
1636 /*
1637 * Ring size is always XXX entries
1638 * Bits 31-16: Maximum RX frame size
1639 * Bits 15-2 : Reserved (should be 0)
1640 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1641 * Bit 0 : Reserved
1642 */
1643 rcb->bge_maxlen_flags =
1644 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1645 }
1646 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1647 /* Write the standard receive producer ring control block. */
1648 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1649 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1650 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1651 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1652
1653 /* Reset the standard receive producer ring producer index. */
1654 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1655
1656 /*
1657 * Initialize the jumbo RX producer ring control
1658 * block. We set the 'ring disabled' bit in the
1659 * flags field until we're actually ready to start
1660 * using this ring (i.e. once we set the MTU
1661 * high enough to require it).
1662 */
1663 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1664 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1665 /* Get the jumbo receive producer ring RCB parameters. */
1666 rcb->bge_hostaddr.bge_addr_lo =
1667 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1668 rcb->bge_hostaddr.bge_addr_hi =
1669 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1670 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1671 sc->bge_cdata.bge_rx_jumbo_ring_map,
1672 BUS_DMASYNC_PREREAD);
1673 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1674 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1675 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1676 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1677 rcb->bge_hostaddr.bge_addr_hi);
1678 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1679 rcb->bge_hostaddr.bge_addr_lo);
1680 /* Program the jumbo receive producer ring RCB parameters. */
1681 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1682 rcb->bge_maxlen_flags);
1683 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1684 /* Reset the jumbo receive producer ring producer index. */
1685 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1686 }
1687
1688 /* Disable the mini receive producer ring RCB. */
1689 if (BGE_IS_5700_FAMILY(sc)) {
1690 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1691 rcb->bge_maxlen_flags =
1692 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1693 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1694 rcb->bge_maxlen_flags);
1695 /* Reset the mini receive producer ring producer index. */
1696 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1697 }
1698
1699 /*
1700 * The BD ring replenish thresholds control how often the
1701 * hardware fetches new BD's from the producer rings in host
1702 * memory. Setting the value too low on a busy system can
1703 * starve the hardware and recue the throughpout.
1704 *
1705 * Set the BD ring replentish thresholds. The recommended
1706 * values are 1/8th the number of descriptors allocated to
1707 * each ring.
1708 * XXX The 5754 requires a lower threshold, so it might be a
1709 * requirement of all 575x family chips. The Linux driver sets
1710 * the lower threshold for all 5705 family chips as well, but there
1711 * are reports that it might not need to be so strict.
1712 *
1713 * XXX Linux does some extra fiddling here for the 5906 parts as
1714 * well.
1715 */
1716 if (BGE_IS_5705_PLUS(sc))
1717 val = 8;
1718 else
1719 val = BGE_STD_RX_RING_CNT / 8;
1720 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1721 if (BGE_IS_JUMBO_CAPABLE(sc))
1722 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1723 BGE_JUMBO_RX_RING_CNT/8);
1724
1725 /*
1726 * Disable all send rings by setting the 'ring disabled' bit
1727 * in the flags field of all the TX send ring control blocks,
1728 * located in NIC memory.
1729 */
1730 if (!BGE_IS_5705_PLUS(sc))
1731 /* 5700 to 5704 had 16 send rings. */
1732 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1733 else
1734 limit = 1;
1735 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1736 for (i = 0; i < limit; i++) {
1737 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1738 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1739 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1740 vrcb += sizeof(struct bge_rcb);
1741 }
1742
1743 /* Configure send ring RCB 0 (we use only the first ring) */
1744 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1745 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1746 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1747 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1748 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1749 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1750 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1751 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1752
1753 /*
1754 * Disable all receive return rings by setting the
1755 * 'ring diabled' bit in the flags field of all the receive
1756 * return ring control blocks, located in NIC memory.
1757 */
1758 if (!BGE_IS_5705_PLUS(sc))
1759 limit = BGE_RX_RINGS_MAX;
1760 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1761 limit = 4;
1762 else
1763 limit = 1;
1764 /* Disable all receive return rings. */
1765 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1766 for (i = 0; i < limit; i++) {
1767 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1768 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1769 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1770 BGE_RCB_FLAG_RING_DISABLED);
1771 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1772 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1773 (i * (sizeof(uint64_t))), 0);
1774 vrcb += sizeof(struct bge_rcb);
1775 }
1776
1777 /*
1778 * Set up receive return ring 0. Note that the NIC address
1779 * for RX return rings is 0x0. The return rings live entirely
1780 * within the host, so the nicaddr field in the RCB isn't used.
1781 */
1782 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1783 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1784 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1785 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1786 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1787 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1788 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1789
1790 /* Set random backoff seed for TX */
1791 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1792 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1793 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1794 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1795 BGE_TX_BACKOFF_SEED_MASK);
1796
1797 /* Set inter-packet gap */
1798 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1799
1800 /*
1801 * Specify which ring to use for packets that don't match
1802 * any RX rules.
1803 */
1804 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1805
1806 /*
1807 * Configure number of RX lists. One interrupt distribution
1808 * list, sixteen active lists, one bad frames class.
1809 */
1810 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1811
1812 /* Inialize RX list placement stats mask. */
1813 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1814 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1815
1816 /* Disable host coalescing until we get it set up */
1817 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1818
1819 /* Poll to make sure it's shut down. */
1820 for (i = 0; i < BGE_TIMEOUT; i++) {
1821 DELAY(10);
1822 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1823 break;
1824 }
1825
1826 if (i == BGE_TIMEOUT) {
1827 device_printf(sc->bge_dev,
1828 "host coalescing engine failed to idle\n");
1829 return (ENXIO);
1830 }
1831
1832 /* Set up host coalescing defaults */
1833 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1834 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1835 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1836 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1837 if (!(BGE_IS_5705_PLUS(sc))) {
1838 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1839 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1840 }
1841 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1842 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1843
1844 /* Set up address of statistics block */
1845 if (!(BGE_IS_5705_PLUS(sc))) {
1846 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1847 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1848 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1849 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1850 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1851 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1852 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1853 }
1854
1855 /* Set up address of status block */
1856 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1857 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1858 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1859 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1860
1861 /* Set up status block size. */
1862 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1863 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1864 val = BGE_STATBLKSZ_FULL;
1865 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1866 } else {
1867 val = BGE_STATBLKSZ_32BYTE;
1868 bzero(sc->bge_ldata.bge_status_block, 32);
1869 }
1870 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1871 sc->bge_cdata.bge_status_map,
1872 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1873
1874 /* Turn on host coalescing state machine */
1875 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1876
1877 /* Turn on RX BD completion state machine and enable attentions */
1878 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1879 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1880
1881 /* Turn on RX list placement state machine */
1882 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1883
1884 /* Turn on RX list selector state machine. */
1885 if (!(BGE_IS_5705_PLUS(sc)))
1886 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1887
1888 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1889 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1890 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1891 BGE_MACMODE_FRMHDR_DMA_ENB;
1892
1893 if (sc->bge_flags & BGE_FLAG_TBI)
1894 val |= BGE_PORTMODE_TBI;
1895 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1896 val |= BGE_PORTMODE_GMII;
1897 else
1898 val |= BGE_PORTMODE_MII;
1899
1900 /* Turn on DMA, clear stats */
1901 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1902
1903 /* Set misc. local control, enable interrupts on attentions */
1904 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1905
1906#ifdef notdef
1907 /* Assert GPIO pins for PHY reset */
1908 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1909 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1910 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1911 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1912#endif
1913
1914 /* Turn on DMA completion state machine */
1915 if (!(BGE_IS_5705_PLUS(sc)))
1916 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1917
1918 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1919
1920 /* Enable host coalescing bug fix. */
1921 if (BGE_IS_5755_PLUS(sc))
1922 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1923
1924 /* Request larger DMA burst size to get better performance. */
1925 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1926 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1927
1928 /* Turn on write DMA state machine */
1929 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1930 DELAY(40);
1931
1932 /* Turn on read DMA state machine */
1933 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1934 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1935 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1936 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1937 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1938 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1939 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1940 if (sc->bge_flags & BGE_FLAG_PCIE)
1941 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1942 if (sc->bge_flags & BGE_FLAG_TSO) {
1943 val |= BGE_RDMAMODE_TSO4_ENABLE;
1944 if (sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1945 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1946 val |= BGE_RDMAMODE_TSO6_ENABLE;
1947 }
1948 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1949 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1950 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1951 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1952 /*
1953 * Enable fix for read DMA FIFO overruns.
1954 * The fix is to limit the number of RX BDs
1955 * the hardware would fetch at a fime.
1956 */
1957 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1958 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
1959 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1960 }
1961 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1962 DELAY(40);
1963
1964 /* Turn on RX data completion state machine */
1965 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1966
1967 /* Turn on RX BD initiator state machine */
1968 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1969
1970 /* Turn on RX data and RX BD initiator state machine */
1971 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1972
1973 /* Turn on Mbuf cluster free state machine */
1974 if (!(BGE_IS_5705_PLUS(sc)))
1975 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1976
1977 /* Turn on send BD completion state machine */
1978 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1979
1980 /* Turn on send data completion state machine */
1981 val = BGE_SDCMODE_ENABLE;
1982 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1983 val |= BGE_SDCMODE_CDELAY;
1984 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1985
1986 /* Turn on send data initiator state machine */
1987 if (sc->bge_flags & BGE_FLAG_TSO)
1988 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1989 else
1990 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1991
1992 /* Turn on send BD initiator state machine */
1993 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1994
1995 /* Turn on send BD selector state machine */
1996 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1997
1998 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1999 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2000 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2001
2002 /* ack/clear link change events */
2003 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2004 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2005 BGE_MACSTAT_LINK_CHANGED);
2006 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2007
2008 /*
2009 * Enable attention when the link has changed state for
2010 * devices that use auto polling.
2011 */
2012 if (sc->bge_flags & BGE_FLAG_TBI) {
2013 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2014 } else {
2015 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2016 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2017 DELAY(80);
2018 }
2019 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2020 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2021 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2022 BGE_EVTENB_MI_INTERRUPT);
2023 }
2024
2025 /*
2026 * Clear any pending link state attention.
2027 * Otherwise some link state change events may be lost until attention
2028 * is cleared by bge_intr() -> bge_link_upd() sequence.
2029 * It's not necessary on newer BCM chips - perhaps enabling link
2030 * state change attentions implies clearing pending attention.
2031 */
2032 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2033 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2034 BGE_MACSTAT_LINK_CHANGED);
2035
2036 /* Enable link state change attentions. */
2037 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2038
2039 return (0);
2040}
2041
2042const struct bge_revision *
2043bge_lookup_rev(uint32_t chipid)
2044{
2045 const struct bge_revision *br;
2046
2047 for (br = bge_revisions; br->br_name != NULL; br++) {
2048 if (br->br_chipid == chipid)
2049 return (br);
2050 }
2051
2052 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2053 if (br->br_chipid == BGE_ASICREV(chipid))
2054 return (br);
2055 }
2056
2057 return (NULL);
2058}
2059
2060const struct bge_vendor *
2061bge_lookup_vendor(uint16_t vid)
2062{
2063 const struct bge_vendor *v;
2064
2065 for (v = bge_vendors; v->v_name != NULL; v++)
2066 if (v->v_id == vid)
2067 return (v);
2068
2069 panic("%s: unknown vendor %d", __func__, vid);
2070 return (NULL);
2071}
2072
2073/*
2074 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2075 * against our list and return its name if we find a match.
2076 *
2077 * Note that since the Broadcom controller contains VPD support, we
2078 * try to get the device name string from the controller itself instead
2079 * of the compiled-in string. It guarantees we'll always announce the
2080 * right product name. We fall back to the compiled-in string when
2081 * VPD is unavailable or corrupt.
2082 */
2083static int
2084bge_probe(device_t dev)
2085{
2086 const struct bge_type *t = bge_devs;
2087 struct bge_softc *sc = device_get_softc(dev);
2088 uint16_t vid, did;
2089
2090 sc->bge_dev = dev;
2091 vid = pci_get_vendor(dev);
2092 did = pci_get_device(dev);
2093 while(t->bge_vid != 0) {
2094 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2095 char model[64], buf[96];
2096 const struct bge_revision *br;
2097 const struct bge_vendor *v;
2098 uint32_t id;
2099
2100 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2101 BGE_PCIMISCCTL_ASICREV_SHIFT;
2102 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
2103 id = pci_read_config(dev,
2104 BGE_PCI_PRODID_ASICREV, 4);
2105 br = bge_lookup_rev(id);
2106 v = bge_lookup_vendor(vid);
2107 {
2108#if __FreeBSD_version > 700024
2109 const char *pname;
2110
2111 if (bge_has_eaddr(sc) &&
2112 pci_get_vpd_ident(dev, &pname) == 0)
2113 snprintf(model, 64, "%s", pname);
2114 else
2115#endif
2116 snprintf(model, 64, "%s %s",
2117 v->v_name,
2118 br != NULL ? br->br_name :
2119 "NetXtreme Ethernet Controller");
2120 }
2121 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2122 br != NULL ? "" : "unknown ", id);
2123 device_set_desc_copy(dev, buf);
2124 return (0);
2125 }
2126 t++;
2127 }
2128
2129 return (ENXIO);
2130}
2131
2132static void
2133bge_dma_free(struct bge_softc *sc)
2134{
2135 int i;
2136
2137 /* Destroy DMA maps for RX buffers. */
2138 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2139 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2140 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2141 sc->bge_cdata.bge_rx_std_dmamap[i]);
2142 }
2143 if (sc->bge_cdata.bge_rx_std_sparemap)
2144 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2145 sc->bge_cdata.bge_rx_std_sparemap);
2146
2147 /* Destroy DMA maps for jumbo RX buffers. */
2148 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2149 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2150 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2151 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2152 }
2153 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2154 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2155 sc->bge_cdata.bge_rx_jumbo_sparemap);
2156
2157 /* Destroy DMA maps for TX buffers. */
2158 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2159 if (sc->bge_cdata.bge_tx_dmamap[i])
2160 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2161 sc->bge_cdata.bge_tx_dmamap[i]);
2162 }
2163
2164 if (sc->bge_cdata.bge_rx_mtag)
2165 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2166 if (sc->bge_cdata.bge_tx_mtag)
2167 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2168
2169
2170 /* Destroy standard RX ring. */
2171 if (sc->bge_cdata.bge_rx_std_ring_map)
2172 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2173 sc->bge_cdata.bge_rx_std_ring_map);
2174 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2175 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2176 sc->bge_ldata.bge_rx_std_ring,
2177 sc->bge_cdata.bge_rx_std_ring_map);
2178
2179 if (sc->bge_cdata.bge_rx_std_ring_tag)
2180 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2181
2182 /* Destroy jumbo RX ring. */
2183 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2184 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2185 sc->bge_cdata.bge_rx_jumbo_ring_map);
2186
2187 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2188 sc->bge_ldata.bge_rx_jumbo_ring)
2189 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2190 sc->bge_ldata.bge_rx_jumbo_ring,
2191 sc->bge_cdata.bge_rx_jumbo_ring_map);
2192
2193 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2194 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2195
2196 /* Destroy RX return ring. */
2197 if (sc->bge_cdata.bge_rx_return_ring_map)
2198 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2199 sc->bge_cdata.bge_rx_return_ring_map);
2200
2201 if (sc->bge_cdata.bge_rx_return_ring_map &&
2202 sc->bge_ldata.bge_rx_return_ring)
2203 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2204 sc->bge_ldata.bge_rx_return_ring,
2205 sc->bge_cdata.bge_rx_return_ring_map);
2206
2207 if (sc->bge_cdata.bge_rx_return_ring_tag)
2208 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2209
2210 /* Destroy TX ring. */
2211 if (sc->bge_cdata.bge_tx_ring_map)
2212 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2213 sc->bge_cdata.bge_tx_ring_map);
2214
2215 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2216 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2217 sc->bge_ldata.bge_tx_ring,
2218 sc->bge_cdata.bge_tx_ring_map);
2219
2220 if (sc->bge_cdata.bge_tx_ring_tag)
2221 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2222
2223 /* Destroy status block. */
2224 if (sc->bge_cdata.bge_status_map)
2225 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2226 sc->bge_cdata.bge_status_map);
2227
2228 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2229 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2230 sc->bge_ldata.bge_status_block,
2231 sc->bge_cdata.bge_status_map);
2232
2233 if (sc->bge_cdata.bge_status_tag)
2234 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2235
2236 /* Destroy statistics block. */
2237 if (sc->bge_cdata.bge_stats_map)
2238 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2239 sc->bge_cdata.bge_stats_map);
2240
2241 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2242 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2243 sc->bge_ldata.bge_stats,
2244 sc->bge_cdata.bge_stats_map);
2245
2246 if (sc->bge_cdata.bge_stats_tag)
2247 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2248
2249 if (sc->bge_cdata.bge_buffer_tag)
2250 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2251
2252 /* Destroy the parent tag. */
2253 if (sc->bge_cdata.bge_parent_tag)
2254 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2255}
2256
2257static int
2258bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2259 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2260 bus_addr_t *paddr, const char *msg)
2261{
2262 struct bge_dmamap_arg ctx;
2263 bus_addr_t lowaddr;
2264 bus_size_t ring_end;
2265 int error;
2266
2267 lowaddr = BUS_SPACE_MAXADDR;
2268again:
2269 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2270 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2271 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2272 if (error != 0) {
2273 device_printf(sc->bge_dev,
2274 "could not create %s dma tag\n", msg);
2275 return (ENOMEM);
2276 }
2277 /* Allocate DMA'able memory for ring. */
2278 error = bus_dmamem_alloc(*tag, (void **)ring,
2279 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2280 if (error != 0) {
2281 device_printf(sc->bge_dev,
2282 "could not allocate DMA'able memory for %s\n", msg);
2283 return (ENOMEM);
2284 }
2285 /* Load the address of the ring. */
2286 ctx.bge_busaddr = 0;
2287 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2288 &ctx, BUS_DMA_NOWAIT);
2289 if (error != 0) {
2290 device_printf(sc->bge_dev,
2291 "could not load DMA'able memory for %s\n", msg);
2292 return (ENOMEM);
2293 }
2294 *paddr = ctx.bge_busaddr;
2295 ring_end = *paddr + maxsize;
2296 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2297 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2298 /*
2299 * 4GB boundary crossed. Limit maximum allowable DMA
2300 * address space to 32bit and try again.
2301 */
2302 bus_dmamap_unload(*tag, *map);
2303 bus_dmamem_free(*tag, *ring, *map);
2304 bus_dma_tag_destroy(*tag);
2305 if (bootverbose)
2306 device_printf(sc->bge_dev, "4GB boundary crossed, "
2307 "limit DMA address space to 32bit for %s\n", msg);
2308 *ring = NULL;
2309 *tag = NULL;
2310 *map = NULL;
2311 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2312 goto again;
2313 }
2314 return (0);
2315}
2316
2317static int
2318bge_dma_alloc(struct bge_softc *sc)
2319{
2320 bus_addr_t lowaddr;
2321 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2322 int i, error;
2323
2324 lowaddr = BUS_SPACE_MAXADDR;
2325 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2326 lowaddr = BGE_DMA_MAXADDR;
2327 /*
2328 * Allocate the parent bus DMA tag appropriate for PCI.
2329 */
2330 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2331 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2332 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2333 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2334 if (error != 0) {
2335 device_printf(sc->bge_dev,
2336 "could not allocate parent dma tag\n");
2337 return (ENOMEM);
2338 }
2339
2340 /* Create tag for standard RX ring. */
2341 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2342 &sc->bge_cdata.bge_rx_std_ring_tag,
2343 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2344 &sc->bge_cdata.bge_rx_std_ring_map,
2345 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2346 if (error)
2347 return (error);
2348
2349 /* Create tag for RX return ring. */
2350 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2351 &sc->bge_cdata.bge_rx_return_ring_tag,
2352 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2353 &sc->bge_cdata.bge_rx_return_ring_map,
2354 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2355 if (error)
2356 return (error);
2357
2358 /* Create tag for TX ring. */
2359 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2360 &sc->bge_cdata.bge_tx_ring_tag,
2361 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2362 &sc->bge_cdata.bge_tx_ring_map,
2363 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2364 if (error)
2365 return (error);
2366
2367 /*
2368 * Create tag for status block.
2369 * Because we only use single Tx/Rx/Rx return ring, use
2370 * minimum status block size except BCM5700 AX/BX which
2371 * seems to want to see full status block size regardless
2372 * of configured number of ring.
2373 */
2374 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2375 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2376 sbsz = BGE_STATUS_BLK_SZ;
2377 else
2378 sbsz = 32;
2379 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2380 &sc->bge_cdata.bge_status_tag,
2381 (uint8_t **)&sc->bge_ldata.bge_status_block,
2382 &sc->bge_cdata.bge_status_map,
2383 &sc->bge_ldata.bge_status_block_paddr, "status block");
2384 if (error)
2385 return (error);
2386
2387 /* Create tag for statistics block. */
2388 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2389 &sc->bge_cdata.bge_stats_tag,
2390 (uint8_t **)&sc->bge_ldata.bge_stats,
2391 &sc->bge_cdata.bge_stats_map,
2392 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2393 if (error)
2394 return (error);
2395
2396 /* Create tag for jumbo RX ring. */
2397 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2398 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2399 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2400 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2401 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2402 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2403 if (error)
2404 return (error);
2405 }
2406
2407 /* Create parent tag for buffers. */
2408 boundary = 0;
2409 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2410 boundary = BGE_DMA_BNDRY;
2411 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2412 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2413 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2414 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2415 if (error != 0) {
2416 device_printf(sc->bge_dev,
2417 "could not allocate buffer dma tag\n");
2418 return (ENOMEM);
2419 }
2420 /* Create tag for Tx mbufs. */
2421 if (sc->bge_flags & BGE_FLAG_TSO) {
2422 txsegsz = BGE_TSOSEG_SZ;
2423 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2424 } else {
2425 txsegsz = MCLBYTES;
2426 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2427 }
2428 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2429 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2430 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2431 &sc->bge_cdata.bge_tx_mtag);
2432
2433 if (error) {
2434 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2435 return (ENOMEM);
2436 }
2437
2438 /* Create tag for Rx mbufs. */
2439 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2440 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2441 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2442
2443 if (error) {
2444 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2445 return (ENOMEM);
2446 }
2447
2448 /* Create DMA maps for RX buffers. */
2449 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2450 &sc->bge_cdata.bge_rx_std_sparemap);
2451 if (error) {
2452 device_printf(sc->bge_dev,
2453 "can't create spare DMA map for RX\n");
2454 return (ENOMEM);
2455 }
2456 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2457 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2458 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2459 if (error) {
2460 device_printf(sc->bge_dev,
2461 "can't create DMA map for RX\n");
2462 return (ENOMEM);
2463 }
2464 }
2465
2466 /* Create DMA maps for TX buffers. */
2467 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2468 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2469 &sc->bge_cdata.bge_tx_dmamap[i]);
2470 if (error) {
2471 device_printf(sc->bge_dev,
2472 "can't create DMA map for TX\n");
2473 return (ENOMEM);
2474 }
2475 }
2476
2477 /* Create tags for jumbo RX buffers. */
2478 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2479 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2480 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2481 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2482 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2483 if (error) {
2484 device_printf(sc->bge_dev,
2485 "could not allocate jumbo dma tag\n");
2486 return (ENOMEM);
2487 }
2488 /* Create DMA maps for jumbo RX buffers. */
2489 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2490 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2491 if (error) {
2492 device_printf(sc->bge_dev,
2493 "can't create spare DMA map for jumbo RX\n");
2494 return (ENOMEM);
2495 }
2496 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2497 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2498 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2499 if (error) {
2500 device_printf(sc->bge_dev,
2501 "can't create DMA map for jumbo RX\n");
2502 return (ENOMEM);
2503 }
2504 }
2505 }
2506
2507 return (0);
2508}
2509
2510/*
2511 * Return true if this device has more than one port.
2512 */
2513static int
2514bge_has_multiple_ports(struct bge_softc *sc)
2515{
2516 device_t dev = sc->bge_dev;
2517 u_int b, d, f, fscan, s;
2518
2519 d = pci_get_domain(dev);
2520 b = pci_get_bus(dev);
2521 s = pci_get_slot(dev);
2522 f = pci_get_function(dev);
2523 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2524 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2525 return (1);
2526 return (0);
2527}
2528
2529/*
2530 * Return true if MSI can be used with this device.
2531 */
2532static int
2533bge_can_use_msi(struct bge_softc *sc)
2534{
2535 int can_use_msi = 0;
2536
2537 switch (sc->bge_asicrev) {
2538 case BGE_ASICREV_BCM5714_A0:
2539 case BGE_ASICREV_BCM5714:
2540 /*
2541 * Apparently, MSI doesn't work when these chips are
2542 * configured in single-port mode.
2543 */
2544 if (bge_has_multiple_ports(sc))
2545 can_use_msi = 1;
2546 break;
2547 case BGE_ASICREV_BCM5750:
2548 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2549 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2550 can_use_msi = 1;
2551 break;
2552 default:
2553 if (BGE_IS_575X_PLUS(sc))
2554 can_use_msi = 1;
2555 }
2556 return (can_use_msi);
2557}
2558
2559static int
2560bge_attach(device_t dev)
2561{
2562 struct ifnet *ifp;
2563 struct bge_softc *sc;
2564 uint32_t hwcfg = 0, misccfg;
2565 u_char eaddr[ETHER_ADDR_LEN];
2566 int error, msicount, reg, rid, trys;
2567
2568 sc = device_get_softc(dev);
2569 sc->bge_dev = dev;
2570
2571 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2572
2573 /*
2574 * Map control/status registers.
2575 */
2576 pci_enable_busmaster(dev);
2577
2578 rid = PCIR_BAR(0);
2579 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2580 RF_ACTIVE);
2581
2582 if (sc->bge_res == NULL) {
2583 device_printf (sc->bge_dev, "couldn't map memory\n");
2584 error = ENXIO;
2585 goto fail;
2586 }
2587
2588 /* Save various chip information. */
2589 sc->bge_chipid =
2590 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2591 BGE_PCIMISCCTL_ASICREV_SHIFT;
2592 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2593 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2594 4);
2595 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2596 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2597
2598 /* Set default PHY address. */
2599 sc->bge_phy_addr = 1;
2600
2601 /*
2602 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2603 * 5705 A0 and A1 chips.
2604 */
2605 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2606 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2607 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2608 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2609 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2610
2611 if (bge_has_eaddr(sc))
2612 sc->bge_flags |= BGE_FLAG_EADDR;
2613
2614 /* Save chipset family. */
2615 switch (sc->bge_asicrev) {
2616 case BGE_ASICREV_BCM5755:
2617 case BGE_ASICREV_BCM5761:
2618 case BGE_ASICREV_BCM5784:
2619 case BGE_ASICREV_BCM5785:
2620 case BGE_ASICREV_BCM5787:
2621 case BGE_ASICREV_BCM57780:
2622 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2623 BGE_FLAG_5705_PLUS;
2624 break;
2625 case BGE_ASICREV_BCM5700:
2626 case BGE_ASICREV_BCM5701:
2627 case BGE_ASICREV_BCM5703:
2628 case BGE_ASICREV_BCM5704:
2629 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2630 break;
2631 case BGE_ASICREV_BCM5714_A0:
2632 case BGE_ASICREV_BCM5780:
2633 case BGE_ASICREV_BCM5714:
2634 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2635 /* FALLTHROUGH */
2636 case BGE_ASICREV_BCM5750:
2637 case BGE_ASICREV_BCM5752:
2638 case BGE_ASICREV_BCM5906:
2639 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2640 /* FALLTHROUGH */
2641 case BGE_ASICREV_BCM5705:
2642 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2643 break;
2644 }
2645
2646 /* Set various PHY bug flags. */
2647 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2648 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2649 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2650 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2651 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2652 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2653 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2654 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2655 if (pci_get_subvendor(dev) == DELL_VENDORID)
2656 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2657 if ((BGE_IS_5705_PLUS(sc)) &&
2658 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2659 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2660 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2661 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2662 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2663 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2664 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2665 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2666 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2667 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2668 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2669 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2670 } else
2671 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2672 }
2673
2674 /* Identify the chips that use an CPMU. */
2675 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2676 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2677 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2678 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2679 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2680 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2681 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2682 else
2683 sc->bge_mi_mode = BGE_MIMODE_BASE;
2684 /* Enable auto polling for BCM570[0-5]. */
2685 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2686 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2687
2688 /*
2689 * All controllers that are not 5755 or higher have 4GB
2690 * boundary DMA bug.
2691 * Whenever an address crosses a multiple of the 4GB boundary
2692 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2693 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2694 * state machine will lockup and cause the device to hang.
2695 */
2696 if (BGE_IS_5755_PLUS(sc) == 0)
2697 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2698
2699 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2700 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2701 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2702 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2703 sc->bge_flags |= BGE_FLAG_5788;
2704 }
2705
2706 /*
2707 * Some controllers seem to require a special firmware to use
2708 * TSO. But the firmware is not available to FreeBSD and Linux
2709 * claims that the TSO performed by the firmware is slower than
2710 * hardware based TSO. Moreover the firmware based TSO has one
2711 * known bug which can't handle TSO if ethernet header + IP/TCP
2712 * header is greater than 80 bytes. The workaround for the TSO
2713 * bug exist but it seems it's too expensive than not using
2714 * TSO at all. Some hardwares also have the TSO bug so limit
2715 * the TSO to the controllers that are not affected TSO issues
2716 * (e.g. 5755 or higher).
2717 */
2718 if (BGE_IS_5755_PLUS(sc)) {
2719 /*
2720 * BCM5754 and BCM5787 shares the same ASIC id so
2721 * explicit device id check is required.
2722 * Due to unknown reason TSO does not work on BCM5755M.
2723 */
2724 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2725 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2726 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2727 sc->bge_flags |= BGE_FLAG_TSO;
2728 }
2729
2730 /*
2731 * Check if this is a PCI-X or PCI Express device.
2732 */
2733 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2734 /*
2735 * Found a PCI Express capabilities register, this
2736 * must be a PCI Express device.
2737 */
2738 sc->bge_flags |= BGE_FLAG_PCIE;
2739 sc->bge_expcap = reg;
2740 if (pci_get_max_read_req(dev) != 4096)
2741 pci_set_max_read_req(dev, 4096);
2742 } else {
2743 /*
2744 * Check if the device is in PCI-X Mode.
2745 * (This bit is not valid on PCI Express controllers.)
2746 */
2747 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2748 sc->bge_pcixcap = reg;
2749 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2750 BGE_PCISTATE_PCI_BUSMODE) == 0)
2751 sc->bge_flags |= BGE_FLAG_PCIX;
2752 }
2753
2754 /*
2755 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2756 * not actually a MAC controller bug but an issue with the embedded
2757 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2758 */
2759 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2760 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2761 /*
2762 * Allocate the interrupt, using MSI if possible. These devices
2763 * support 8 MSI messages, but only the first one is used in
2764 * normal operation.
2765 */
2766 rid = 0;
2767 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2768 sc->bge_msicap = reg;
2769 if (bge_can_use_msi(sc)) {
2770 msicount = pci_msi_count(dev);
2771 if (msicount > 1)
2772 msicount = 1;
2773 } else
2774 msicount = 0;
2775 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2776 rid = 1;
2777 sc->bge_flags |= BGE_FLAG_MSI;
2778 }
2779 }
2780
2781 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2782 RF_SHAREABLE | RF_ACTIVE);
2783
2784 if (sc->bge_irq == NULL) {
2785 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2786 error = ENXIO;
2787 goto fail;
2788 }
2789
2790 device_printf(dev,
2791 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2792 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2793 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2794 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2795
2796 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2797
2798 /* Try to reset the chip. */
2799 if (bge_reset(sc)) {
2800 device_printf(sc->bge_dev, "chip reset failed\n");
2801 error = ENXIO;
2802 goto fail;
2803 }
2804
2805 sc->bge_asf_mode = 0;
2806 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2807 == BGE_MAGIC_NUMBER)) {
2808 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2809 & BGE_HWCFG_ASF) {
2810 sc->bge_asf_mode |= ASF_ENABLE;
2811 sc->bge_asf_mode |= ASF_STACKUP;
2812 if (BGE_IS_575X_PLUS(sc))
2813 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2814 }
2815 }
2816
2817 /* Try to reset the chip again the nice way. */
2818 bge_stop_fw(sc);
2819 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2820 if (bge_reset(sc)) {
2821 device_printf(sc->bge_dev, "chip reset failed\n");
2822 error = ENXIO;
2823 goto fail;
2824 }
2825
2826 bge_sig_legacy(sc, BGE_RESET_STOP);
2827 bge_sig_post_reset(sc, BGE_RESET_STOP);
2828
2829 if (bge_chipinit(sc)) {
2830 device_printf(sc->bge_dev, "chip initialization failed\n");
2831 error = ENXIO;
2832 goto fail;
2833 }
2834
2835 error = bge_get_eaddr(sc, eaddr);
2836 if (error) {
2837 device_printf(sc->bge_dev,
2838 "failed to read station address\n");
2839 error = ENXIO;
2840 goto fail;
2841 }
2842
2843 /* 5705 limits RX return ring to 512 entries. */
2844 if (BGE_IS_5705_PLUS(sc))
2845 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2846 else
2847 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2848
2849 if (bge_dma_alloc(sc)) {
2850 device_printf(sc->bge_dev,
2851 "failed to allocate DMA resources\n");
2852 error = ENXIO;
2853 goto fail;
2854 }
2855
2856 bge_add_sysctls(sc);
2857
2858 /* Set default tuneable values. */
2859 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2860 sc->bge_rx_coal_ticks = 150;
2861 sc->bge_tx_coal_ticks = 150;
2862 sc->bge_rx_max_coal_bds = 10;
2863 sc->bge_tx_max_coal_bds = 10;
2864
2865 /* Initialize checksum features to use. */
2866 sc->bge_csum_features = BGE_CSUM_FEATURES;
2867 if (sc->bge_forced_udpcsum != 0)
2868 sc->bge_csum_features |= CSUM_UDP;
2869
2870 /* Set up ifnet structure */
2871 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2872 if (ifp == NULL) {
2873 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2874 error = ENXIO;
2875 goto fail;
2876 }
2877 ifp->if_softc = sc;
2878 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2879 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2880 ifp->if_ioctl = bge_ioctl;
2881 ifp->if_start = bge_start;
2882 ifp->if_init = bge_init;
2883 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2884 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2885 IFQ_SET_READY(&ifp->if_snd);
2886 ifp->if_hwassist = sc->bge_csum_features;
2887 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2888 IFCAP_VLAN_MTU;
2889 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2890 ifp->if_hwassist |= CSUM_TSO;
2891 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
2892 }
2893#ifdef IFCAP_VLAN_HWCSUM
2894 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2895#endif
2896 ifp->if_capenable = ifp->if_capabilities;
2897#ifdef DEVICE_POLLING
2898 ifp->if_capabilities |= IFCAP_POLLING;
2899#endif
2900
2901 /*
2902 * 5700 B0 chips do not support checksumming correctly due
2903 * to hardware bugs.
2904 */
2905 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2906 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2907 ifp->if_capenable &= ~IFCAP_HWCSUM;
2908 ifp->if_hwassist = 0;
2909 }
2910
2911 /*
2912 * Figure out what sort of media we have by checking the
2913 * hardware config word in the first 32k of NIC internal memory,
2914 * or fall back to examining the EEPROM if necessary.
2915 * Note: on some BCM5700 cards, this value appears to be unset.
2916 * If that's the case, we have to rely on identifying the NIC
2917 * by its PCI subsystem ID, as we do below for the SysKonnect
2918 * SK-9D41.
2919 */
2920 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2921 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2922 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2923 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2924 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2925 sizeof(hwcfg))) {
2926 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2927 error = ENXIO;
2928 goto fail;
2929 }
2930 hwcfg = ntohl(hwcfg);
2931 }
2932
2933 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2934 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
2935 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2936 if (BGE_IS_5714_FAMILY(sc))
2937 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2938 else
2939 sc->bge_flags |= BGE_FLAG_TBI;
2940 }
2941
2942 if (sc->bge_flags & BGE_FLAG_TBI) {
2943 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2944 bge_ifmedia_sts);
2945 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2946 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2947 0, NULL);
2948 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2949 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2950 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2951 } else {
2952 /*
2953 * Do transceiver setup and tell the firmware the
2954 * driver is down so we can try to get access the
2955 * probe if ASF is running. Retry a couple of times
2956 * if we get a conflict with the ASF firmware accessing
2957 * the PHY.
2958 */
2959 trys = 0;
2960 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2961again:
2962 bge_asf_driver_up(sc);
2963
2964 if (mii_phy_probe(dev, &sc->bge_miibus,
2965 bge_ifmedia_upd, bge_ifmedia_sts)) {
2966 if (trys++ < 4) {
2967 device_printf(sc->bge_dev, "Try again\n");
2968 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2969 BMCR_RESET);
2970 goto again;
2971 }
2972
2973 device_printf(sc->bge_dev, "MII without any PHY!\n");
2974 error = ENXIO;
2975 goto fail;
2976 }
2977
2978 /*
2979 * Now tell the firmware we are going up after probing the PHY
2980 */
2981 if (sc->bge_asf_mode & ASF_STACKUP)
2982 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2983 }
2984
2985 /*
2986 * When using the BCM5701 in PCI-X mode, data corruption has
2987 * been observed in the first few bytes of some received packets.
2988 * Aligning the packet buffer in memory eliminates the corruption.
2989 * Unfortunately, this misaligns the packet payloads. On platforms
2990 * which do not support unaligned accesses, we will realign the
2991 * payloads by copying the received packets.
2992 */
2993 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2994 sc->bge_flags & BGE_FLAG_PCIX)
2995 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2996
2997 /*
2998 * Call MI attach routine.
2999 */
3000 ether_ifattach(ifp, eaddr);
3001 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3002
3003 /* Tell upper layer we support long frames. */
3004 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3005
3006 /*
3007 * Hookup IRQ last.
3008 */
3009#if __FreeBSD_version > 700030
3010 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3011 /* Take advantage of single-shot MSI. */
3012 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3013 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3014 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3015 taskqueue_thread_enqueue, &sc->bge_tq);
3016 if (sc->bge_tq == NULL) {
3017 device_printf(dev, "could not create taskqueue.\n");
3018 ether_ifdetach(ifp);
3019 error = ENXIO;
3020 goto fail;
3021 }
3022 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3023 device_get_nameunit(sc->bge_dev));
3024 error = bus_setup_intr(dev, sc->bge_irq,
3025 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3026 &sc->bge_intrhand);
3027 if (error)
3028 ether_ifdetach(ifp);
3029 } else
3030 error = bus_setup_intr(dev, sc->bge_irq,
3031 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3032 &sc->bge_intrhand);
3033#else
3034 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
3035 bge_intr, sc, &sc->bge_intrhand);
3036#endif
3037
3038 if (error) {
3039 bge_detach(dev);
3040 device_printf(sc->bge_dev, "couldn't set up irq\n");
3041 }
3042
3043 return (0);
3044
3045fail:
3046 bge_release_resources(sc);
3047
3048 return (error);
3049}
3050
3051static int
3052bge_detach(device_t dev)
3053{
3054 struct bge_softc *sc;
3055 struct ifnet *ifp;
3056
3057 sc = device_get_softc(dev);
3058 ifp = sc->bge_ifp;
3059
3060#ifdef DEVICE_POLLING
3061 if (ifp->if_capenable & IFCAP_POLLING)
3062 ether_poll_deregister(ifp);
3063#endif
3064
3065 BGE_LOCK(sc);
3066 bge_stop(sc);
3067 bge_reset(sc);
3068 BGE_UNLOCK(sc);
3069
3070 callout_drain(&sc->bge_stat_ch);
3071
3072 if (sc->bge_tq)
3073 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3074 ether_ifdetach(ifp);
3075
3076 if (sc->bge_flags & BGE_FLAG_TBI) {
3077 ifmedia_removeall(&sc->bge_ifmedia);
3078 } else {
3079 bus_generic_detach(dev);
3080 device_delete_child(dev, sc->bge_miibus);
3081 }
3082
3083 bge_release_resources(sc);
3084
3085 return (0);
3086}
3087
3088static void
3089bge_release_resources(struct bge_softc *sc)
3090{
3091 device_t dev;
3092
3093 dev = sc->bge_dev;
3094
3095 if (sc->bge_tq != NULL)
3096 taskqueue_free(sc->bge_tq);
3097
3098 if (sc->bge_intrhand != NULL)
3099 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3100
3101 if (sc->bge_irq != NULL)
3102 bus_release_resource(dev, SYS_RES_IRQ,
3103 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3104
3105 if (sc->bge_flags & BGE_FLAG_MSI)
3106 pci_release_msi(dev);
3107
3108 if (sc->bge_res != NULL)
3109 bus_release_resource(dev, SYS_RES_MEMORY,
3110 PCIR_BAR(0), sc->bge_res);
3111
3112 if (sc->bge_ifp != NULL)
3113 if_free(sc->bge_ifp);
3114
3115 bge_dma_free(sc);
3116
3117 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3118 BGE_LOCK_DESTROY(sc);
3119}
3120
3121static int
3122bge_reset(struct bge_softc *sc)
3123{
3124 device_t dev;
3125 uint32_t cachesize, command, pcistate, reset, val;
3126 void (*write_op)(struct bge_softc *, int, int);
3127 uint16_t devctl;
3128 int i;
3129
3130 dev = sc->bge_dev;
3131
3132 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3133 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3134 if (sc->bge_flags & BGE_FLAG_PCIE)
3135 write_op = bge_writemem_direct;
3136 else
3137 write_op = bge_writemem_ind;
3138 } else
3139 write_op = bge_writereg_ind;
3140
3141 /* Save some important PCI state. */
3142 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3143 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3144 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3145
3146 pci_write_config(dev, BGE_PCI_MISC_CTL,
3147 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3148 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3149
3150 /* Disable fastboot on controllers that support it. */
3151 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3152 BGE_IS_5755_PLUS(sc)) {
3153 if (bootverbose)
3154 device_printf(dev, "Disabling fastboot\n");
3155 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3156 }
3157
3158 /*
3159 * Write the magic number to SRAM at offset 0xB50.
3160 * When firmware finishes its initialization it will
3161 * write ~BGE_MAGIC_NUMBER to the same location.
3162 */
3163 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3164
3165 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3166
3167 /* XXX: Broadcom Linux driver. */
3168 if (sc->bge_flags & BGE_FLAG_PCIE) {
3169 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3170 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3171 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3172 /* Prevent PCIE link training during global reset */
3173 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3174 reset |= 1 << 29;
3175 }
3176 }
3177
3178 /*
3179 * Set GPHY Power Down Override to leave GPHY
3180 * powered up in D0 uninitialized.
3181 */
3182 if (BGE_IS_5705_PLUS(sc))
3183 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3184
3185 /* Issue global reset */
3186 write_op(sc, BGE_MISC_CFG, reset);
3187
3188 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3189 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3190 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3191 val | BGE_VCPU_STATUS_DRV_RESET);
3192 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3193 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3194 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3195 }
3196
3197 DELAY(1000);
3198
3199 /* XXX: Broadcom Linux driver. */
3200 if (sc->bge_flags & BGE_FLAG_PCIE) {
3201 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3202 DELAY(500000); /* wait for link training to complete */
3203 val = pci_read_config(dev, 0xC4, 4);
3204 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3205 }
3206 devctl = pci_read_config(dev,
3207 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3208 /* Clear enable no snoop and disable relaxed ordering. */
3209 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3210 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3211 /* Set PCIE max payload size to 128. */
3212 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3213 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3214 devctl, 2);
3215 /* Clear error status. */
3216 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3217 PCIM_EXP_STA_CORRECTABLE_ERROR |
3218 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3219 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3220 }
3221
3222 /* Reset some of the PCI state that got zapped by reset. */
3223 pci_write_config(dev, BGE_PCI_MISC_CTL,
3224 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3225 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3226 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3227 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3228 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3229 /*
3230 * Disable PCI-X relaxed ordering to ensure status block update
3231 * comes first then packet buffer DMA. Otherwise driver may
3232 * read stale status block.
3233 */
3234 if (sc->bge_flags & BGE_FLAG_PCIX) {
3235 devctl = pci_read_config(dev,
3236 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3237 devctl &= ~PCIXM_COMMAND_ERO;
3238 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3239 devctl &= ~PCIXM_COMMAND_MAX_READ;
3240 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3241 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3242 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3243 PCIXM_COMMAND_MAX_READ);
3244 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3245 }
3246 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3247 devctl, 2);
3248 }
3249 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3250 if (BGE_IS_5714_FAMILY(sc)) {
3251 /* This chip disables MSI on reset. */
3252 if (sc->bge_flags & BGE_FLAG_MSI) {
3253 val = pci_read_config(dev,
3254 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3255 pci_write_config(dev,
3256 sc->bge_msicap + PCIR_MSI_CTRL,
3257 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3258 val = CSR_READ_4(sc, BGE_MSI_MODE);
3259 CSR_WRITE_4(sc, BGE_MSI_MODE,
3260 val | BGE_MSIMODE_ENABLE);
3261 }
3262 val = CSR_READ_4(sc, BGE_MARB_MODE);
3263 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3264 } else
3265 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3266
3267 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3268 for (i = 0; i < BGE_TIMEOUT; i++) {
3269 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3270 if (val & BGE_VCPU_STATUS_INIT_DONE)
3271 break;
3272 DELAY(100);
3273 }
3274 if (i == BGE_TIMEOUT) {
3275 device_printf(dev, "reset timed out\n");
3276 return (1);
3277 }
3278 } else {
3279 /*
3280 * Poll until we see the 1's complement of the magic number.
3281 * This indicates that the firmware initialization is complete.
3282 * We expect this to fail if no chip containing the Ethernet
3283 * address is fitted though.
3284 */
3285 for (i = 0; i < BGE_TIMEOUT; i++) {
3286 DELAY(10);
3287 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3288 if (val == ~BGE_MAGIC_NUMBER)
3289 break;
3290 }
3291
3292 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3293 device_printf(dev,
3294 "firmware handshake timed out, found 0x%08x\n",
3295 val);
3296 }
3297
3298 /*
3299 * XXX Wait for the value of the PCISTATE register to
3300 * return to its original pre-reset state. This is a
3301 * fairly good indicator of reset completion. If we don't
3302 * wait for the reset to fully complete, trying to read
3303 * from the device's non-PCI registers may yield garbage
3304 * results.
3305 */
3306 for (i = 0; i < BGE_TIMEOUT; i++) {
3307 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3308 break;
3309 DELAY(10);
3310 }
3311
3312 /* Fix up byte swapping. */
3313 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3314 BGE_MODECTL_BYTESWAP_DATA);
3315
3316 /* Tell the ASF firmware we are up */
3317 if (sc->bge_asf_mode & ASF_STACKUP)
3318 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3319
3320 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3321
3322 /*
3323 * The 5704 in TBI mode apparently needs some special
3324 * adjustment to insure the SERDES drive level is set
3325 * to 1.2V.
3326 */
3327 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3328 sc->bge_flags & BGE_FLAG_TBI) {
3329 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3330 val = (val & ~0xFFF) | 0x880;
3331 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3332 }
3333
3334 /* XXX: Broadcom Linux driver. */
3335 if (sc->bge_flags & BGE_FLAG_PCIE &&
3336 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3337 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3338 /* Enable Data FIFO protection. */
3339 val = CSR_READ_4(sc, 0x7C00);
3340 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3341 }
3342 DELAY(10000);
3343
3344 return (0);
3345}
3346
3347static __inline void
3348bge_rxreuse_std(struct bge_softc *sc, int i)
3349{
3350 struct bge_rx_bd *r;
3351
3352 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3353 r->bge_flags = BGE_RXBDFLAG_END;
3354 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3355 r->bge_idx = i;
3356 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3357}
3358
3359static __inline void
3360bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3361{
3362 struct bge_extrx_bd *r;
3363
3364 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3365 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3366 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3367 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3368 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3369 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3370 r->bge_idx = i;
3371 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3372}
3373
3374/*
3375 * Frame reception handling. This is called if there's a frame
3376 * on the receive return list.
3377 *
3378 * Note: we have to be able to handle two possibilities here:
3379 * 1) the frame is from the jumbo receive ring
3380 * 2) the frame is from the standard receive ring
3381 */
3382
3383static int
3384bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3385{
3386 struct ifnet *ifp;
3387 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3388 uint16_t rx_cons;
3389
3390 rx_cons = sc->bge_rx_saved_considx;
3391
3392 /* Nothing to do. */
3393 if (rx_cons == rx_prod)
3394 return (rx_npkts);
3395
3396 ifp = sc->bge_ifp;
3397
3398 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3399 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3400 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3401 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3402 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3403 (MCLBYTES - ETHER_ALIGN))
3404 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3405 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3406
3407 while (rx_cons != rx_prod) {
3408 struct bge_rx_bd *cur_rx;
3409 uint32_t rxidx;
3410 struct mbuf *m = NULL;
3411 uint16_t vlan_tag = 0;
3412 int have_tag = 0;
3413
3414#ifdef DEVICE_POLLING
3415 if (ifp->if_capenable & IFCAP_POLLING) {
3416 if (sc->rxcycles <= 0)
3417 break;
3418 sc->rxcycles--;
3419 }
3420#endif
3421
3422 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3423
3424 rxidx = cur_rx->bge_idx;
3425 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3426
3427 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3428 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3429 have_tag = 1;
3430 vlan_tag = cur_rx->bge_vlan_tag;
3431 }
3432
3433 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3434 jumbocnt++;
3435 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3436 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3437 bge_rxreuse_jumbo(sc, rxidx);
3438 continue;
3439 }
3440 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3441 bge_rxreuse_jumbo(sc, rxidx);
3442 ifp->if_iqdrops++;
3443 continue;
3444 }
3445 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3446 } else {
3447 stdcnt++;
3448 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3449 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3450 bge_rxreuse_std(sc, rxidx);
3451 continue;
3452 }
3453 if (bge_newbuf_std(sc, rxidx) != 0) {
3454 bge_rxreuse_std(sc, rxidx);
3455 ifp->if_iqdrops++;
3456 continue;
3457 }
3458 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3459 }
3460
3461 ifp->if_ipackets++;
3462#ifndef __NO_STRICT_ALIGNMENT
3463 /*
3464 * For architectures with strict alignment we must make sure
3465 * the payload is aligned.
3466 */
3467 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3468 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3469 cur_rx->bge_len);
3470 m->m_data += ETHER_ALIGN;
3471 }
3472#endif
3473 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3474 m->m_pkthdr.rcvif = ifp;
3475
3476 if (ifp->if_capenable & IFCAP_RXCSUM) {
3477 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3478 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3479 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3480 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3481 }
3482 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3483 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3484 m->m_pkthdr.csum_data =
3485 cur_rx->bge_tcp_udp_csum;
3486 m->m_pkthdr.csum_flags |=
3487 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3488 }
3489 }
3490
3491 /*
3492 * If we received a packet with a vlan tag,
3493 * attach that information to the packet.
3494 */
3495 if (have_tag) {
3496#if __FreeBSD_version > 700022
3497 m->m_pkthdr.ether_vtag = vlan_tag;
3498 m->m_flags |= M_VLANTAG;
3499#else
3500 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3501 if (m == NULL)
3502 continue;
3503#endif
3504 }
3505
3506 if (holdlck != 0) {
3507 BGE_UNLOCK(sc);
3508 (*ifp->if_input)(ifp, m);
3509 BGE_LOCK(sc);
3510 } else
3511 (*ifp->if_input)(ifp, m);
3512 rx_npkts++;
3513
3514 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3515 return (rx_npkts);
3516 }
3517
3518 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3519 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3520 if (stdcnt > 0)
3521 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3522 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3523
3524 if (jumbocnt > 0)
3525 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3526 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3527
3528 sc->bge_rx_saved_considx = rx_cons;
3529 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3530 if (stdcnt)
3531 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3532 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3533 if (jumbocnt)
3534 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3535 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3536#ifdef notyet
3537 /*
3538 * This register wraps very quickly under heavy packet drops.
3539 * If you need correct statistics, you can enable this check.
3540 */
3541 if (BGE_IS_5705_PLUS(sc))
3542 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3543#endif
3544 return (rx_npkts);
3545}
3546
3547static void
3548bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3549{
3550 struct bge_tx_bd *cur_tx;
3551 struct ifnet *ifp;
3552
3553 BGE_LOCK_ASSERT(sc);
3554
3555 /* Nothing to do. */
3556 if (sc->bge_tx_saved_considx == tx_cons)
3557 return;
3558
3559 ifp = sc->bge_ifp;
3560
3561 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3562 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3563 /*
3564 * Go through our tx ring and free mbufs for those
3565 * frames that have been sent.
3566 */
3567 while (sc->bge_tx_saved_considx != tx_cons) {
3568 uint32_t idx;
3569
3570 idx = sc->bge_tx_saved_considx;
3571 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3572 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3573 ifp->if_opackets++;
3574 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3575 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3576 sc->bge_cdata.bge_tx_dmamap[idx],
3577 BUS_DMASYNC_POSTWRITE);
3578 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3579 sc->bge_cdata.bge_tx_dmamap[idx]);
3580 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3581 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3582 }
3583 sc->bge_txcnt--;
3584 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3585 }
3586
3587 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3588 if (sc->bge_txcnt == 0)
3589 sc->bge_timer = 0;
3590}
3591
3592#ifdef DEVICE_POLLING
3593static int
3594bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3595{
3596 struct bge_softc *sc = ifp->if_softc;
3597 uint16_t rx_prod, tx_cons;
3598 uint32_t statusword;
3599 int rx_npkts = 0;
3600
3601 BGE_LOCK(sc);
3602 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3603 BGE_UNLOCK(sc);
3604 return (rx_npkts);
3605 }
3606
3607 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3608 sc->bge_cdata.bge_status_map,
3609 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3610 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3611 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3612
3613 statusword = sc->bge_ldata.bge_status_block->bge_status;
3614 sc->bge_ldata.bge_status_block->bge_status = 0;
3615
3616 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3617 sc->bge_cdata.bge_status_map,
3618 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3619
3620 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3621 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3622 sc->bge_link_evt++;
3623
3624 if (cmd == POLL_AND_CHECK_STATUS)
3625 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3626 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3627 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3628 bge_link_upd(sc);
3629
3630 sc->rxcycles = count;
3631 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3632 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3633 BGE_UNLOCK(sc);
3634 return (rx_npkts);
3635 }
3636 bge_txeof(sc, tx_cons);
3637 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3638 bge_start_locked(ifp);
3639
3640 BGE_UNLOCK(sc);
3641 return (rx_npkts);
3642}
3643#endif /* DEVICE_POLLING */
3644
3645static int
3646bge_msi_intr(void *arg)
3647{
3648 struct bge_softc *sc;
3649
3650 sc = (struct bge_softc *)arg;
3651 /*
3652 * This interrupt is not shared and controller already
3653 * disabled further interrupt.
3654 */
3655 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3656 return (FILTER_HANDLED);
3657}
3658
3659static void
3660bge_intr_task(void *arg, int pending)
3661{
3662 struct bge_softc *sc;
3663 struct ifnet *ifp;
3664 uint32_t status;
3665 uint16_t rx_prod, tx_cons;
3666
3667 sc = (struct bge_softc *)arg;
3668 ifp = sc->bge_ifp;
3669
3670 BGE_LOCK(sc);
3671 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3672 BGE_UNLOCK(sc);
3673 return;
3674 }
3675
3676 /* Get updated status block. */
3677 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3678 sc->bge_cdata.bge_status_map,
3679 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3680
3681 /* Save producer/consumer indexess. */
3682 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3683 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3684 status = sc->bge_ldata.bge_status_block->bge_status;
3685 sc->bge_ldata.bge_status_block->bge_status = 0;
3686 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3687 sc->bge_cdata.bge_status_map,
3688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3689
3690 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3691 bge_link_upd(sc);
3692
3693 /* Let controller work. */
3694 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3695
3696 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3697 sc->bge_rx_saved_considx != rx_prod) {
3698 /* Check RX return ring producer/consumer. */
3699 BGE_UNLOCK(sc);
3700 bge_rxeof(sc, rx_prod, 0);
3701 BGE_LOCK(sc);
3702 }
3703 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3704 /* Check TX ring producer/consumer. */
3705 bge_txeof(sc, tx_cons);
3706 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3707 bge_start_locked(ifp);
3708 }
3709 BGE_UNLOCK(sc);
3710}
3711
3712static void
3713bge_intr(void *xsc)
3714{
3715 struct bge_softc *sc;
3716 struct ifnet *ifp;
3717 uint32_t statusword;
3718 uint16_t rx_prod, tx_cons;
3719
3720 sc = xsc;
3721
3722 BGE_LOCK(sc);
3723
3724 ifp = sc->bge_ifp;
3725
3726#ifdef DEVICE_POLLING
3727 if (ifp->if_capenable & IFCAP_POLLING) {
3728 BGE_UNLOCK(sc);
3729 return;
3730 }
3731#endif
3732
3733 /*
3734 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3735 * disable interrupts by writing nonzero like we used to, since with
3736 * our current organization this just gives complications and
3737 * pessimizations for re-enabling interrupts. We used to have races
3738 * instead of the necessary complications. Disabling interrupts
3739 * would just reduce the chance of a status update while we are
3740 * running (by switching to the interrupt-mode coalescence
3741 * parameters), but this chance is already very low so it is more
3742 * efficient to get another interrupt than prevent it.
3743 *
3744 * We do the ack first to ensure another interrupt if there is a
3745 * status update after the ack. We don't check for the status
3746 * changing later because it is more efficient to get another
3747 * interrupt than prevent it, not quite as above (not checking is
3748 * a smaller optimization than not toggling the interrupt enable,
3749 * since checking doesn't involve PCI accesses and toggling require
3750 * the status check). So toggling would probably be a pessimization
3751 * even with MSI. It would only be needed for using a task queue.
3752 */
3753 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3754
3755 /*
3756 * Do the mandatory PCI flush as well as get the link status.
3757 */
3758 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3759
3760 /* Make sure the descriptor ring indexes are coherent. */
3761 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3762 sc->bge_cdata.bge_status_map,
3763 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3764 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3765 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3766 sc->bge_ldata.bge_status_block->bge_status = 0;
3767 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3768 sc->bge_cdata.bge_status_map,
3769 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3770
3771 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3772 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3773 statusword || sc->bge_link_evt)
3774 bge_link_upd(sc);
3775
3776 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3777 /* Check RX return ring producer/consumer. */
3778 bge_rxeof(sc, rx_prod, 1);
3779 }
3780
3781 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3782 /* Check TX ring producer/consumer. */
3783 bge_txeof(sc, tx_cons);
3784 }
3785
3786 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3787 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3788 bge_start_locked(ifp);
3789
3790 BGE_UNLOCK(sc);
3791}
3792
3793static void
3794bge_asf_driver_up(struct bge_softc *sc)
3795{
3796 if (sc->bge_asf_mode & ASF_STACKUP) {
3797 /* Send ASF heartbeat aprox. every 2s */
3798 if (sc->bge_asf_count)
3799 sc->bge_asf_count --;
3800 else {
3801 sc->bge_asf_count = 2;
3802 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3803 BGE_FW_DRV_ALIVE);
3804 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3805 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3806 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3807 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3808 }
3809 }
3810}
3811
3812static void
3813bge_tick(void *xsc)
3814{
3815 struct bge_softc *sc = xsc;
3816 struct mii_data *mii = NULL;
3817
3818 BGE_LOCK_ASSERT(sc);
3819
3820 /* Synchronize with possible callout reset/stop. */
3821 if (callout_pending(&sc->bge_stat_ch) ||
3822 !callout_active(&sc->bge_stat_ch))
3823 return;
3824
3825 if (BGE_IS_5705_PLUS(sc))
3826 bge_stats_update_regs(sc);
3827 else
3828 bge_stats_update(sc);
3829
3830 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3831 mii = device_get_softc(sc->bge_miibus);
3832 /*
3833 * Do not touch PHY if we have link up. This could break
3834 * IPMI/ASF mode or produce extra input errors
3835 * (extra errors was reported for bcm5701 & bcm5704).
3836 */
3837 if (!sc->bge_link)
3838 mii_tick(mii);
3839 } else {
3840 /*
3841 * Since in TBI mode auto-polling can't be used we should poll
3842 * link status manually. Here we register pending link event
3843 * and trigger interrupt.
3844 */
3845#ifdef DEVICE_POLLING
3846 /* In polling mode we poll link state in bge_poll(). */
3847 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3848#endif
3849 {
3850 sc->bge_link_evt++;
3851 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3852 sc->bge_flags & BGE_FLAG_5788)
3853 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3854 else
3855 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3856 }
3857 }
3858
3859 bge_asf_driver_up(sc);
3860 bge_watchdog(sc);
3861
3862 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3863}
3864
3865static void
3866bge_stats_update_regs(struct bge_softc *sc)
3867{
3868 struct ifnet *ifp;
3869 struct bge_mac_stats *stats;
3870
3871 ifp = sc->bge_ifp;
3872 stats = &sc->bge_mac_stats;
3873
3874 stats->ifHCOutOctets +=
3875 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3876 stats->etherStatsCollisions +=
3877 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3878 stats->outXonSent +=
3879 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3880 stats->outXoffSent +=
3881 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3882 stats->dot3StatsInternalMacTransmitErrors +=
3883 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3884 stats->dot3StatsSingleCollisionFrames +=
3885 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3886 stats->dot3StatsMultipleCollisionFrames +=
3887 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3888 stats->dot3StatsDeferredTransmissions +=
3889 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3890 stats->dot3StatsExcessiveCollisions +=
3891 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3892 stats->dot3StatsLateCollisions +=
3893 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3894 stats->ifHCOutUcastPkts +=
3895 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3896 stats->ifHCOutMulticastPkts +=
3897 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3898 stats->ifHCOutBroadcastPkts +=
3899 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3900
3901 stats->ifHCInOctets +=
3902 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3903 stats->etherStatsFragments +=
3904 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3905 stats->ifHCInUcastPkts +=
3906 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3907 stats->ifHCInMulticastPkts +=
3908 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3909 stats->ifHCInBroadcastPkts +=
3910 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3911 stats->dot3StatsFCSErrors +=
3912 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3913 stats->dot3StatsAlignmentErrors +=
3914 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3915 stats->xonPauseFramesReceived +=
3916 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3917 stats->xoffPauseFramesReceived +=
3918 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3919 stats->macControlFramesReceived +=
3920 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3921 stats->xoffStateEntered +=
3922 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3923 stats->dot3StatsFramesTooLong +=
3924 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3925 stats->etherStatsJabbers +=
3926 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3927 stats->etherStatsUndersizePkts +=
3928 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3929
3930 stats->FramesDroppedDueToFilters +=
3931 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3932 stats->DmaWriteQueueFull +=
3933 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3934 stats->DmaWriteHighPriQueueFull +=
3935 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3936 stats->NoMoreRxBDs +=
3937 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3938 stats->InputDiscards +=
3939 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3940 stats->InputErrors +=
3941 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3942 stats->RecvThresholdHit +=
3943 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3944
3945 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
3946 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
3947 stats->InputErrors);
3948}
3949
3950static void
3951bge_stats_clear_regs(struct bge_softc *sc)
3952{
3953
3954 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3955 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3956 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3957 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3958 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3959 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3960 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3961 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3962 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3963 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3964 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3965 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3966 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3967
3968 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3969 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3970 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3971 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3972 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3973 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3974 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3975 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3976 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3977 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3978 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3979 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3980 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3981 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3982
3983 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3984 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3985 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3986 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3987 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3988 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3989 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3990}
3991
3992static void
3993bge_stats_update(struct bge_softc *sc)
3994{
3995 struct ifnet *ifp;
3996 bus_size_t stats;
3997 uint32_t cnt; /* current register value */
3998
3999 ifp = sc->bge_ifp;
4000
4001 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4002
4003#define READ_STAT(sc, stats, stat) \
4004 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4005
4006 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4007 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4008 sc->bge_tx_collisions = cnt;
4009
4010 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4011 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4012 sc->bge_rx_discards = cnt;
4013
4014 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4015 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4016 sc->bge_tx_discards = cnt;
4017
4018#undef READ_STAT
4019}
4020
4021/*
4022 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4023 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4024 * but when such padded frames employ the bge IP/TCP checksum offload,
4025 * the hardware checksum assist gives incorrect results (possibly
4026 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4027 * If we pad such runts with zeros, the onboard checksum comes out correct.
4028 */
4029static __inline int
4030bge_cksum_pad(struct mbuf *m)
4031{
4032 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4033 struct mbuf *last;
4034
4035 /* If there's only the packet-header and we can pad there, use it. */
4036 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4037 M_TRAILINGSPACE(m) >= padlen) {
4038 last = m;
4039 } else {
4040 /*
4041 * Walk packet chain to find last mbuf. We will either
4042 * pad there, or append a new mbuf and pad it.
4043 */
4044 for (last = m; last->m_next != NULL; last = last->m_next);
4045 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4046 /* Allocate new empty mbuf, pad it. Compact later. */
4047 struct mbuf *n;
4048
4049 MGET(n, M_DONTWAIT, MT_DATA);
4050 if (n == NULL)
4051 return (ENOBUFS);
4052 n->m_len = 0;
4053 last->m_next = n;
4054 last = n;
4055 }
4056 }
4057
4058 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4059 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4060 last->m_len += padlen;
4061 m->m_pkthdr.len += padlen;
4062
4063 return (0);
4064}
4065
4066static struct mbuf *
4067bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
4068{
4069 struct ip *ip;
4070 struct tcphdr *tcp;
4071 struct mbuf *n;
4072 uint16_t hlen;
4073 uint32_t poff;
4074
4075 if (M_WRITABLE(m) == 0) {
4076 /* Get a writable copy. */
4077 n = m_dup(m, M_DONTWAIT);
4078 m_freem(m);
4079 if (n == NULL)
4080 return (NULL);
4081 m = n;
4082 }
4083 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4084 if (m == NULL)
4085 return (NULL);
4086 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4087 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4088 m = m_pullup(m, poff + sizeof(struct tcphdr));
4089 if (m == NULL)
4090 return (NULL);
4091 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4092 m = m_pullup(m, poff + (tcp->th_off << 2));
4093 if (m == NULL)
4094 return (NULL);
4095 /*
4096 * It seems controller doesn't modify IP length and TCP pseudo
4097 * checksum. These checksum computed by upper stack should be 0.
4098 */
4099 *mss = m->m_pkthdr.tso_segsz;
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
218
219 { SK_VENDORID, SK_DEVICEID_ALTIMA },
220
221 { TC_VENDORID, TC_DEVICEID_3C996 },
222
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
226
227 { 0, 0 }
228};
229
230static const struct bge_vendor {
231 uint16_t v_id;
232 const char *v_name;
233} bge_vendors[] = {
234 { ALTEON_VENDORID, "Alteon" },
235 { ALTIMA_VENDORID, "Altima" },
236 { APPLE_VENDORID, "Apple" },
237 { BCOM_VENDORID, "Broadcom" },
238 { SK_VENDORID, "SysKonnect" },
239 { TC_VENDORID, "3Com" },
240 { FJTSU_VENDORID, "Fujitsu" },
241
242 { 0, NULL }
243};
244
245static const struct bge_revision {
246 uint32_t br_chipid;
247 const char *br_name;
248} bge_revisions[] = {
249 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
250 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
251 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
252 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
253 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
254 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
255 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
256 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
257 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
258 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
259 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
260 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
261 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
262 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
263 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
264 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
265 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
266 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
267 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
268 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
269 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
270 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
271 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
272 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
273 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
274 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
275 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
276 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
277 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
278 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
279 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
280 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
281 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
282 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
283 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
284 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
285 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
286 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
287 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
288 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
289 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
290 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
291 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
292 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
293 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
294 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
295 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
296 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
297 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
298 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
299 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
302 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
303 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
304 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
305 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
306 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
307 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
308
309 { 0, NULL }
310};
311
312/*
313 * Some defaults for major revisions, so that newer steppings
314 * that we don't know about have a shot at working.
315 */
316static const struct bge_revision bge_majorrevs[] = {
317 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
318 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
319 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
320 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
321 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
322 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
323 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
324 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
325 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
326 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
327 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
328 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
329 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
330 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
331 /* 5754 and 5787 share the same ASIC ID */
332 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
333 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
334 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
335
336 { 0, NULL }
337};
338
339#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
340#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
341#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
342#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
343#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
344#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
345
346const struct bge_revision * bge_lookup_rev(uint32_t);
347const struct bge_vendor * bge_lookup_vendor(uint16_t);
348
349typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
350
351static int bge_probe(device_t);
352static int bge_attach(device_t);
353static int bge_detach(device_t);
354static int bge_suspend(device_t);
355static int bge_resume(device_t);
356static void bge_release_resources(struct bge_softc *);
357static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
358static int bge_dma_alloc(struct bge_softc *);
359static void bge_dma_free(struct bge_softc *);
360static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
361 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
362
363static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
364static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
365static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
366static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
367static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
368
369static void bge_txeof(struct bge_softc *, uint16_t);
370static int bge_rxeof(struct bge_softc *, uint16_t, int);
371
372static void bge_asf_driver_up (struct bge_softc *);
373static void bge_tick(void *);
374static void bge_stats_clear_regs(struct bge_softc *);
375static void bge_stats_update(struct bge_softc *);
376static void bge_stats_update_regs(struct bge_softc *);
377static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
378 uint16_t *);
379static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
380
381static void bge_intr(void *);
382static int bge_msi_intr(void *);
383static void bge_intr_task(void *, int);
384static void bge_start_locked(struct ifnet *);
385static void bge_start(struct ifnet *);
386static int bge_ioctl(struct ifnet *, u_long, caddr_t);
387static void bge_init_locked(struct bge_softc *);
388static void bge_init(void *);
389static void bge_stop(struct bge_softc *);
390static void bge_watchdog(struct bge_softc *);
391static int bge_shutdown(device_t);
392static int bge_ifmedia_upd_locked(struct ifnet *);
393static int bge_ifmedia_upd(struct ifnet *);
394static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
395
396static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
397static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
398
399static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
400static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
401
402static void bge_setpromisc(struct bge_softc *);
403static void bge_setmulti(struct bge_softc *);
404static void bge_setvlan(struct bge_softc *);
405
406static __inline void bge_rxreuse_std(struct bge_softc *, int);
407static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
408static int bge_newbuf_std(struct bge_softc *, int);
409static int bge_newbuf_jumbo(struct bge_softc *, int);
410static int bge_init_rx_ring_std(struct bge_softc *);
411static void bge_free_rx_ring_std(struct bge_softc *);
412static int bge_init_rx_ring_jumbo(struct bge_softc *);
413static void bge_free_rx_ring_jumbo(struct bge_softc *);
414static void bge_free_tx_ring(struct bge_softc *);
415static int bge_init_tx_ring(struct bge_softc *);
416
417static int bge_chipinit(struct bge_softc *);
418static int bge_blockinit(struct bge_softc *);
419
420static int bge_has_eaddr(struct bge_softc *);
421static uint32_t bge_readmem_ind(struct bge_softc *, int);
422static void bge_writemem_ind(struct bge_softc *, int, int);
423static void bge_writembx(struct bge_softc *, int, int);
424#ifdef notdef
425static uint32_t bge_readreg_ind(struct bge_softc *, int);
426#endif
427static void bge_writemem_direct(struct bge_softc *, int, int);
428static void bge_writereg_ind(struct bge_softc *, int, int);
429
430static int bge_miibus_readreg(device_t, int, int);
431static int bge_miibus_writereg(device_t, int, int, int);
432static void bge_miibus_statchg(device_t);
433#ifdef DEVICE_POLLING
434static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
435#endif
436
437#define BGE_RESET_START 1
438#define BGE_RESET_STOP 2
439static void bge_sig_post_reset(struct bge_softc *, int);
440static void bge_sig_legacy(struct bge_softc *, int);
441static void bge_sig_pre_reset(struct bge_softc *, int);
442static void bge_stop_fw(struct bge_softc *);
443static int bge_reset(struct bge_softc *);
444static void bge_link_upd(struct bge_softc *);
445
446/*
447 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
448 * leak information to untrusted users. It is also known to cause alignment
449 * traps on certain architectures.
450 */
451#ifdef BGE_REGISTER_DEBUG
452static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
453static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
454static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
455#endif
456static void bge_add_sysctls(struct bge_softc *);
457static void bge_add_sysctl_stats_regs(struct bge_softc *,
458 struct sysctl_ctx_list *, struct sysctl_oid_list *);
459static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
460 struct sysctl_oid_list *);
461static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
462
463static device_method_t bge_methods[] = {
464 /* Device interface */
465 DEVMETHOD(device_probe, bge_probe),
466 DEVMETHOD(device_attach, bge_attach),
467 DEVMETHOD(device_detach, bge_detach),
468 DEVMETHOD(device_shutdown, bge_shutdown),
469 DEVMETHOD(device_suspend, bge_suspend),
470 DEVMETHOD(device_resume, bge_resume),
471
472 /* bus interface */
473 DEVMETHOD(bus_print_child, bus_generic_print_child),
474 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
475
476 /* MII interface */
477 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
478 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
479 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
480
481 { 0, 0 }
482};
483
484static driver_t bge_driver = {
485 "bge",
486 bge_methods,
487 sizeof(struct bge_softc)
488};
489
490static devclass_t bge_devclass;
491
492DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
493DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
494
495static int bge_allow_asf = 1;
496
497TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
498
499SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
500SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
501 "Allow ASF mode if available");
502
503#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
504#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
505#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
506#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
507#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
508
509static int
510bge_has_eaddr(struct bge_softc *sc)
511{
512#ifdef __sparc64__
513 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
514 device_t dev;
515 uint32_t subvendor;
516
517 dev = sc->bge_dev;
518
519 /*
520 * The on-board BGEs found in sun4u machines aren't fitted with
521 * an EEPROM which means that we have to obtain the MAC address
522 * via OFW and that some tests will always fail. We distinguish
523 * such BGEs by the subvendor ID, which also has to be obtained
524 * from OFW instead of the PCI configuration space as the latter
525 * indicates Broadcom as the subvendor of the netboot interface.
526 * For early Blade 1500 and 2500 we even have to check the OFW
527 * device path as the subvendor ID always defaults to Broadcom
528 * there.
529 */
530 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
531 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
532 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
533 return (0);
534 memset(buf, 0, sizeof(buf));
535 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
536 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
537 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
538 return (0);
539 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
540 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
541 return (0);
542 }
543#endif
544 return (1);
545}
546
547static uint32_t
548bge_readmem_ind(struct bge_softc *sc, int off)
549{
550 device_t dev;
551 uint32_t val;
552
553 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
554 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
555 return (0);
556
557 dev = sc->bge_dev;
558
559 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
560 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
561 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
562 return (val);
563}
564
565static void
566bge_writemem_ind(struct bge_softc *sc, int off, int val)
567{
568 device_t dev;
569
570 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
571 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
572 return;
573
574 dev = sc->bge_dev;
575
576 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
577 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
578 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
579}
580
581#ifdef notdef
582static uint32_t
583bge_readreg_ind(struct bge_softc *sc, int off)
584{
585 device_t dev;
586
587 dev = sc->bge_dev;
588
589 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
590 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
591}
592#endif
593
594static void
595bge_writereg_ind(struct bge_softc *sc, int off, int val)
596{
597 device_t dev;
598
599 dev = sc->bge_dev;
600
601 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
602 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
603}
604
605static void
606bge_writemem_direct(struct bge_softc *sc, int off, int val)
607{
608 CSR_WRITE_4(sc, off, val);
609}
610
611static void
612bge_writembx(struct bge_softc *sc, int off, int val)
613{
614 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
615 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
616
617 CSR_WRITE_4(sc, off, val);
618}
619
620/*
621 * Map a single buffer address.
622 */
623
624static void
625bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
626{
627 struct bge_dmamap_arg *ctx;
628
629 if (error)
630 return;
631
632 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
633
634 ctx = arg;
635 ctx->bge_busaddr = segs->ds_addr;
636}
637
638static uint8_t
639bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
640{
641 uint32_t access, byte = 0;
642 int i;
643
644 /* Lock. */
645 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
646 for (i = 0; i < 8000; i++) {
647 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
648 break;
649 DELAY(20);
650 }
651 if (i == 8000)
652 return (1);
653
654 /* Enable access. */
655 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
656 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
657
658 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
659 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
660 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
661 DELAY(10);
662 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
663 DELAY(10);
664 break;
665 }
666 }
667
668 if (i == BGE_TIMEOUT * 10) {
669 if_printf(sc->bge_ifp, "nvram read timed out\n");
670 return (1);
671 }
672
673 /* Get result. */
674 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
675
676 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
677
678 /* Disable access. */
679 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
680
681 /* Unlock. */
682 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
683 CSR_READ_4(sc, BGE_NVRAM_SWARB);
684
685 return (0);
686}
687
688/*
689 * Read a sequence of bytes from NVRAM.
690 */
691static int
692bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
693{
694 int err = 0, i;
695 uint8_t byte = 0;
696
697 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
698 return (1);
699
700 for (i = 0; i < cnt; i++) {
701 err = bge_nvram_getbyte(sc, off + i, &byte);
702 if (err)
703 break;
704 *(dest + i) = byte;
705 }
706
707 return (err ? 1 : 0);
708}
709
710/*
711 * Read a byte of data stored in the EEPROM at address 'addr.' The
712 * BCM570x supports both the traditional bitbang interface and an
713 * auto access interface for reading the EEPROM. We use the auto
714 * access method.
715 */
716static uint8_t
717bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
718{
719 int i;
720 uint32_t byte = 0;
721
722 /*
723 * Enable use of auto EEPROM access so we can avoid
724 * having to use the bitbang method.
725 */
726 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
727
728 /* Reset the EEPROM, load the clock period. */
729 CSR_WRITE_4(sc, BGE_EE_ADDR,
730 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
731 DELAY(20);
732
733 /* Issue the read EEPROM command. */
734 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
735
736 /* Wait for completion */
737 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
738 DELAY(10);
739 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
740 break;
741 }
742
743 if (i == BGE_TIMEOUT * 10) {
744 device_printf(sc->bge_dev, "EEPROM read timed out\n");
745 return (1);
746 }
747
748 /* Get result. */
749 byte = CSR_READ_4(sc, BGE_EE_DATA);
750
751 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
752
753 return (0);
754}
755
756/*
757 * Read a sequence of bytes from the EEPROM.
758 */
759static int
760bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
761{
762 int i, error = 0;
763 uint8_t byte = 0;
764
765 for (i = 0; i < cnt; i++) {
766 error = bge_eeprom_getbyte(sc, off + i, &byte);
767 if (error)
768 break;
769 *(dest + i) = byte;
770 }
771
772 return (error ? 1 : 0);
773}
774
775static int
776bge_miibus_readreg(device_t dev, int phy, int reg)
777{
778 struct bge_softc *sc;
779 uint32_t val;
780 int i;
781
782 sc = device_get_softc(dev);
783
784 /* Prevent the probe from finding incorrect devices. */
785 if (phy != sc->bge_phy_addr)
786 return (0);
787
788 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
789 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
790 CSR_WRITE_4(sc, BGE_MI_MODE,
791 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
792 DELAY(80);
793 }
794
795 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
796 BGE_MIPHY(phy) | BGE_MIREG(reg));
797
798 /* Poll for the PHY register access to complete. */
799 for (i = 0; i < BGE_TIMEOUT; i++) {
800 DELAY(10);
801 val = CSR_READ_4(sc, BGE_MI_COMM);
802 if ((val & BGE_MICOMM_BUSY) == 0) {
803 DELAY(5);
804 val = CSR_READ_4(sc, BGE_MI_COMM);
805 break;
806 }
807 }
808
809 if (i == BGE_TIMEOUT) {
810 device_printf(sc->bge_dev,
811 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
812 phy, reg, val);
813 val = 0;
814 }
815
816 /* Restore the autopoll bit if necessary. */
817 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
818 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
819 DELAY(80);
820 }
821
822 if (val & BGE_MICOMM_READFAIL)
823 return (0);
824
825 return (val & 0xFFFF);
826}
827
828static int
829bge_miibus_writereg(device_t dev, int phy, int reg, int val)
830{
831 struct bge_softc *sc;
832 int i;
833
834 sc = device_get_softc(dev);
835
836 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
837 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
838 return (0);
839
840 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
841 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
842 CSR_WRITE_4(sc, BGE_MI_MODE,
843 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
844 DELAY(80);
845 }
846
847 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
848 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
849
850 for (i = 0; i < BGE_TIMEOUT; i++) {
851 DELAY(10);
852 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
853 DELAY(5);
854 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
855 break;
856 }
857 }
858
859 /* Restore the autopoll bit if necessary. */
860 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
861 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
862 DELAY(80);
863 }
864
865 if (i == BGE_TIMEOUT)
866 device_printf(sc->bge_dev,
867 "PHY write timed out (phy %d, reg %d, val %d)\n",
868 phy, reg, val);
869
870 return (0);
871}
872
873static void
874bge_miibus_statchg(device_t dev)
875{
876 struct bge_softc *sc;
877 struct mii_data *mii;
878 sc = device_get_softc(dev);
879 mii = device_get_softc(sc->bge_miibus);
880
881 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
882 (IFM_ACTIVE | IFM_AVALID)) {
883 switch (IFM_SUBTYPE(mii->mii_media_active)) {
884 case IFM_10_T:
885 case IFM_100_TX:
886 sc->bge_link = 1;
887 break;
888 case IFM_1000_T:
889 case IFM_1000_SX:
890 case IFM_2500_SX:
891 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
892 sc->bge_link = 1;
893 else
894 sc->bge_link = 0;
895 break;
896 default:
897 sc->bge_link = 0;
898 break;
899 }
900 } else
901 sc->bge_link = 0;
902 if (sc->bge_link == 0)
903 return;
904 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
905 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
906 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
907 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
908 else
909 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
910
911 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
912 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
913 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG1)
914 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
915 else
916 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
917 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0)
918 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
919 else
920 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
921 } else {
922 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
923 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
924 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
925 }
926}
927
928/*
929 * Intialize a standard receive ring descriptor.
930 */
931static int
932bge_newbuf_std(struct bge_softc *sc, int i)
933{
934 struct mbuf *m;
935 struct bge_rx_bd *r;
936 bus_dma_segment_t segs[1];
937 bus_dmamap_t map;
938 int error, nsegs;
939
940 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
941 if (m == NULL)
942 return (ENOBUFS);
943 m->m_len = m->m_pkthdr.len = MCLBYTES;
944 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
945 m_adj(m, ETHER_ALIGN);
946
947 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
948 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
949 if (error != 0) {
950 m_freem(m);
951 return (error);
952 }
953 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
954 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
955 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
956 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
957 sc->bge_cdata.bge_rx_std_dmamap[i]);
958 }
959 map = sc->bge_cdata.bge_rx_std_dmamap[i];
960 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
961 sc->bge_cdata.bge_rx_std_sparemap = map;
962 sc->bge_cdata.bge_rx_std_chain[i] = m;
963 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
964 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
965 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
966 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
967 r->bge_flags = BGE_RXBDFLAG_END;
968 r->bge_len = segs[0].ds_len;
969 r->bge_idx = i;
970
971 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
972 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
973
974 return (0);
975}
976
977/*
978 * Initialize a jumbo receive ring descriptor. This allocates
979 * a jumbo buffer from the pool managed internally by the driver.
980 */
981static int
982bge_newbuf_jumbo(struct bge_softc *sc, int i)
983{
984 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
985 bus_dmamap_t map;
986 struct bge_extrx_bd *r;
987 struct mbuf *m;
988 int error, nsegs;
989
990 MGETHDR(m, M_DONTWAIT, MT_DATA);
991 if (m == NULL)
992 return (ENOBUFS);
993
994 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
995 if (!(m->m_flags & M_EXT)) {
996 m_freem(m);
997 return (ENOBUFS);
998 }
999 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1000 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1001 m_adj(m, ETHER_ALIGN);
1002
1003 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1004 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1005 if (error != 0) {
1006 m_freem(m);
1007 return (error);
1008 }
1009
1010 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1011 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1012 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1013 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1014 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1015 }
1016 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1017 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1018 sc->bge_cdata.bge_rx_jumbo_sparemap;
1019 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1020 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1021 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1022 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1023 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1024 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1025
1026 /*
1027 * Fill in the extended RX buffer descriptor.
1028 */
1029 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1030 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1031 r->bge_idx = i;
1032 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1033 switch (nsegs) {
1034 case 4:
1035 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1036 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1037 r->bge_len3 = segs[3].ds_len;
1038 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1039 case 3:
1040 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1041 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1042 r->bge_len2 = segs[2].ds_len;
1043 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1044 case 2:
1045 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1046 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1047 r->bge_len1 = segs[1].ds_len;
1048 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1049 case 1:
1050 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1051 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1052 r->bge_len0 = segs[0].ds_len;
1053 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1054 break;
1055 default:
1056 panic("%s: %d segments\n", __func__, nsegs);
1057 }
1058
1059 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1060 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1061
1062 return (0);
1063}
1064
1065static int
1066bge_init_rx_ring_std(struct bge_softc *sc)
1067{
1068 int error, i;
1069
1070 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1071 sc->bge_std = 0;
1072 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1073 if ((error = bge_newbuf_std(sc, i)) != 0)
1074 return (error);
1075 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1076 }
1077
1078 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1079 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1080
1081 sc->bge_std = 0;
1082 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1083
1084 return (0);
1085}
1086
1087static void
1088bge_free_rx_ring_std(struct bge_softc *sc)
1089{
1090 int i;
1091
1092 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1093 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1094 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1095 sc->bge_cdata.bge_rx_std_dmamap[i],
1096 BUS_DMASYNC_POSTREAD);
1097 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1098 sc->bge_cdata.bge_rx_std_dmamap[i]);
1099 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1100 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1101 }
1102 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1103 sizeof(struct bge_rx_bd));
1104 }
1105}
1106
1107static int
1108bge_init_rx_ring_jumbo(struct bge_softc *sc)
1109{
1110 struct bge_rcb *rcb;
1111 int error, i;
1112
1113 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1114 sc->bge_jumbo = 0;
1115 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1116 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1117 return (error);
1118 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1119 }
1120
1121 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1122 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1123
1124 sc->bge_jumbo = 0;
1125
1126 /* Enable the jumbo receive producer ring. */
1127 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1128 rcb->bge_maxlen_flags =
1129 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1130 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1131
1132 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1133
1134 return (0);
1135}
1136
1137static void
1138bge_free_rx_ring_jumbo(struct bge_softc *sc)
1139{
1140 int i;
1141
1142 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1143 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1144 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1145 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1146 BUS_DMASYNC_POSTREAD);
1147 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1148 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1149 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1150 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1151 }
1152 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1153 sizeof(struct bge_extrx_bd));
1154 }
1155}
1156
1157static void
1158bge_free_tx_ring(struct bge_softc *sc)
1159{
1160 int i;
1161
1162 if (sc->bge_ldata.bge_tx_ring == NULL)
1163 return;
1164
1165 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1166 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1167 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1168 sc->bge_cdata.bge_tx_dmamap[i],
1169 BUS_DMASYNC_POSTWRITE);
1170 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1171 sc->bge_cdata.bge_tx_dmamap[i]);
1172 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1173 sc->bge_cdata.bge_tx_chain[i] = NULL;
1174 }
1175 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1176 sizeof(struct bge_tx_bd));
1177 }
1178}
1179
1180static int
1181bge_init_tx_ring(struct bge_softc *sc)
1182{
1183 sc->bge_txcnt = 0;
1184 sc->bge_tx_saved_considx = 0;
1185
1186 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1187 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1188 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1189
1190 /* Initialize transmit producer index for host-memory send ring. */
1191 sc->bge_tx_prodidx = 0;
1192 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1193
1194 /* 5700 b2 errata */
1195 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1196 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1197
1198 /* NIC-memory send ring not used; initialize to zero. */
1199 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1200 /* 5700 b2 errata */
1201 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1202 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1203
1204 return (0);
1205}
1206
1207static void
1208bge_setpromisc(struct bge_softc *sc)
1209{
1210 struct ifnet *ifp;
1211
1212 BGE_LOCK_ASSERT(sc);
1213
1214 ifp = sc->bge_ifp;
1215
1216 /* Enable or disable promiscuous mode as needed. */
1217 if (ifp->if_flags & IFF_PROMISC)
1218 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1219 else
1220 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1221}
1222
1223static void
1224bge_setmulti(struct bge_softc *sc)
1225{
1226 struct ifnet *ifp;
1227 struct ifmultiaddr *ifma;
1228 uint32_t hashes[4] = { 0, 0, 0, 0 };
1229 int h, i;
1230
1231 BGE_LOCK_ASSERT(sc);
1232
1233 ifp = sc->bge_ifp;
1234
1235 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1236 for (i = 0; i < 4; i++)
1237 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1238 return;
1239 }
1240
1241 /* First, zot all the existing filters. */
1242 for (i = 0; i < 4; i++)
1243 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1244
1245 /* Now program new ones. */
1246 if_maddr_rlock(ifp);
1247 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1248 if (ifma->ifma_addr->sa_family != AF_LINK)
1249 continue;
1250 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1251 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1252 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1253 }
1254 if_maddr_runlock(ifp);
1255
1256 for (i = 0; i < 4; i++)
1257 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1258}
1259
1260static void
1261bge_setvlan(struct bge_softc *sc)
1262{
1263 struct ifnet *ifp;
1264
1265 BGE_LOCK_ASSERT(sc);
1266
1267 ifp = sc->bge_ifp;
1268
1269 /* Enable or disable VLAN tag stripping as needed. */
1270 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1271 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1272 else
1273 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1274}
1275
1276static void
1277bge_sig_pre_reset(struct bge_softc *sc, int type)
1278{
1279
1280 /*
1281 * Some chips don't like this so only do this if ASF is enabled
1282 */
1283 if (sc->bge_asf_mode)
1284 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1285
1286 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1287 switch (type) {
1288 case BGE_RESET_START:
1289 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1290 break;
1291 case BGE_RESET_STOP:
1292 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1293 break;
1294 }
1295 }
1296}
1297
1298static void
1299bge_sig_post_reset(struct bge_softc *sc, int type)
1300{
1301
1302 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1303 switch (type) {
1304 case BGE_RESET_START:
1305 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1306 /* START DONE */
1307 break;
1308 case BGE_RESET_STOP:
1309 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1310 break;
1311 }
1312 }
1313}
1314
1315static void
1316bge_sig_legacy(struct bge_softc *sc, int type)
1317{
1318
1319 if (sc->bge_asf_mode) {
1320 switch (type) {
1321 case BGE_RESET_START:
1322 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1323 break;
1324 case BGE_RESET_STOP:
1325 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1326 break;
1327 }
1328 }
1329}
1330
1331static void
1332bge_stop_fw(struct bge_softc *sc)
1333{
1334 int i;
1335
1336 if (sc->bge_asf_mode) {
1337 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1338 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1339 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1340
1341 for (i = 0; i < 100; i++ ) {
1342 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1343 break;
1344 DELAY(10);
1345 }
1346 }
1347}
1348
1349/*
1350 * Do endian, PCI and DMA initialization.
1351 */
1352static int
1353bge_chipinit(struct bge_softc *sc)
1354{
1355 uint32_t dma_rw_ctl;
1356 uint16_t val;
1357 int i;
1358
1359 /* Set endianness before we access any non-PCI registers. */
1360 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1361
1362 /* Clear the MAC control register */
1363 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1364
1365 /*
1366 * Clear the MAC statistics block in the NIC's
1367 * internal memory.
1368 */
1369 for (i = BGE_STATS_BLOCK;
1370 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1371 BGE_MEMWIN_WRITE(sc, i, 0);
1372
1373 for (i = BGE_STATUS_BLOCK;
1374 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1375 BGE_MEMWIN_WRITE(sc, i, 0);
1376
1377 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1378 /*
1379 * Fix data corruption caused by non-qword write with WB.
1380 * Fix master abort in PCI mode.
1381 * Fix PCI latency timer.
1382 */
1383 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1384 val |= (1 << 10) | (1 << 12) | (1 << 13);
1385 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1386 }
1387
1388 /*
1389 * Set up the PCI DMA control register.
1390 */
1391 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1392 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1393 if (sc->bge_flags & BGE_FLAG_PCIE) {
1394 /* Read watermark not used, 128 bytes for write. */
1395 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1396 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1397 if (BGE_IS_5714_FAMILY(sc)) {
1398 /* 256 bytes for read and write. */
1399 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1400 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1401 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1402 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1403 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1404 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1405 /*
1406 * In the BCM5703, the DMA read watermark should
1407 * be set to less than or equal to the maximum
1408 * memory read byte count of the PCI-X command
1409 * register.
1410 */
1411 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1412 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1413 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1414 /* 1536 bytes for read, 384 bytes for write. */
1415 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1416 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1417 } else {
1418 /* 384 bytes for read and write. */
1419 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1420 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1421 0x0F;
1422 }
1423 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1424 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1425 uint32_t tmp;
1426
1427 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1428 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1429 if (tmp == 6 || tmp == 7)
1430 dma_rw_ctl |=
1431 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1432
1433 /* Set PCI-X DMA write workaround. */
1434 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1435 }
1436 } else {
1437 /* Conventional PCI bus: 256 bytes for read and write. */
1438 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1439 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1440
1441 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1442 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1443 dma_rw_ctl |= 0x0F;
1444 }
1445 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1446 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1447 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1448 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1449 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1450 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1451 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1452 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1453
1454 /*
1455 * Set up general mode register.
1456 */
1457 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1458 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1459 BGE_MODECTL_TX_NO_PHDR_CSUM);
1460
1461 /*
1462 * BCM5701 B5 have a bug causing data corruption when using
1463 * 64-bit DMA reads, which can be terminated early and then
1464 * completed later as 32-bit accesses, in combination with
1465 * certain bridges.
1466 */
1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1468 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1469 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1470
1471 /*
1472 * Tell the firmware the driver is running
1473 */
1474 if (sc->bge_asf_mode & ASF_STACKUP)
1475 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1476
1477 /*
1478 * Disable memory write invalidate. Apparently it is not supported
1479 * properly by these devices. Also ensure that INTx isn't disabled,
1480 * as these chips need it even when using MSI.
1481 */
1482 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1483 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1484
1485 /* Set the timer prescaler (always 66Mhz) */
1486 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1487
1488 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1489 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1490 DELAY(40); /* XXX */
1491
1492 /* Put PHY into ready state */
1493 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1494 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1495 DELAY(40);
1496 }
1497
1498 return (0);
1499}
1500
1501static int
1502bge_blockinit(struct bge_softc *sc)
1503{
1504 struct bge_rcb *rcb;
1505 bus_size_t vrcb;
1506 bge_hostaddr taddr;
1507 uint32_t val;
1508 int i, limit;
1509
1510 /*
1511 * Initialize the memory window pointer register so that
1512 * we can access the first 32K of internal NIC RAM. This will
1513 * allow us to set up the TX send ring RCBs and the RX return
1514 * ring RCBs, plus other things which live in NIC memory.
1515 */
1516 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1517
1518 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1519
1520 if (!(BGE_IS_5705_PLUS(sc))) {
1521 /* Configure mbuf memory pool */
1522 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1523 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1524 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1525 else
1526 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1527
1528 /* Configure DMA resource pool */
1529 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1530 BGE_DMA_DESCRIPTORS);
1531 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1532 }
1533
1534 /* Configure mbuf pool watermarks */
1535 if (!BGE_IS_5705_PLUS(sc)) {
1536 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1537 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1538 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1539 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1540 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1541 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1542 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1543 } else {
1544 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1545 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1546 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1547 }
1548
1549 /* Configure DMA resource watermarks */
1550 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1551 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1552
1553 /* Enable buffer manager */
1554 if (!(BGE_IS_5705_PLUS(sc))) {
1555 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1556 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1557
1558 /* Poll for buffer manager start indication */
1559 for (i = 0; i < BGE_TIMEOUT; i++) {
1560 DELAY(10);
1561 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1562 break;
1563 }
1564
1565 if (i == BGE_TIMEOUT) {
1566 device_printf(sc->bge_dev,
1567 "buffer manager failed to start\n");
1568 return (ENXIO);
1569 }
1570 }
1571
1572 /* Enable flow-through queues */
1573 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1574 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1575
1576 /* Wait until queue initialization is complete */
1577 for (i = 0; i < BGE_TIMEOUT; i++) {
1578 DELAY(10);
1579 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1580 break;
1581 }
1582
1583 if (i == BGE_TIMEOUT) {
1584 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1585 return (ENXIO);
1586 }
1587
1588 /*
1589 * Summary of rings supported by the controller:
1590 *
1591 * Standard Receive Producer Ring
1592 * - This ring is used to feed receive buffers for "standard"
1593 * sized frames (typically 1536 bytes) to the controller.
1594 *
1595 * Jumbo Receive Producer Ring
1596 * - This ring is used to feed receive buffers for jumbo sized
1597 * frames (i.e. anything bigger than the "standard" frames)
1598 * to the controller.
1599 *
1600 * Mini Receive Producer Ring
1601 * - This ring is used to feed receive buffers for "mini"
1602 * sized frames to the controller.
1603 * - This feature required external memory for the controller
1604 * but was never used in a production system. Should always
1605 * be disabled.
1606 *
1607 * Receive Return Ring
1608 * - After the controller has placed an incoming frame into a
1609 * receive buffer that buffer is moved into a receive return
1610 * ring. The driver is then responsible to passing the
1611 * buffer up to the stack. Many versions of the controller
1612 * support multiple RR rings.
1613 *
1614 * Send Ring
1615 * - This ring is used for outgoing frames. Many versions of
1616 * the controller support multiple send rings.
1617 */
1618
1619 /* Initialize the standard receive producer ring control block. */
1620 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1621 rcb->bge_hostaddr.bge_addr_lo =
1622 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1623 rcb->bge_hostaddr.bge_addr_hi =
1624 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1625 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1626 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1627 if (BGE_IS_5705_PLUS(sc)) {
1628 /*
1629 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1630 * Bits 15-2 : Reserved (should be 0)
1631 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1632 * Bit 0 : Reserved
1633 */
1634 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1635 } else {
1636 /*
1637 * Ring size is always XXX entries
1638 * Bits 31-16: Maximum RX frame size
1639 * Bits 15-2 : Reserved (should be 0)
1640 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1641 * Bit 0 : Reserved
1642 */
1643 rcb->bge_maxlen_flags =
1644 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1645 }
1646 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1647 /* Write the standard receive producer ring control block. */
1648 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1649 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1650 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1651 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1652
1653 /* Reset the standard receive producer ring producer index. */
1654 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1655
1656 /*
1657 * Initialize the jumbo RX producer ring control
1658 * block. We set the 'ring disabled' bit in the
1659 * flags field until we're actually ready to start
1660 * using this ring (i.e. once we set the MTU
1661 * high enough to require it).
1662 */
1663 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1664 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1665 /* Get the jumbo receive producer ring RCB parameters. */
1666 rcb->bge_hostaddr.bge_addr_lo =
1667 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1668 rcb->bge_hostaddr.bge_addr_hi =
1669 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1670 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1671 sc->bge_cdata.bge_rx_jumbo_ring_map,
1672 BUS_DMASYNC_PREREAD);
1673 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1674 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1675 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1676 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1677 rcb->bge_hostaddr.bge_addr_hi);
1678 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1679 rcb->bge_hostaddr.bge_addr_lo);
1680 /* Program the jumbo receive producer ring RCB parameters. */
1681 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1682 rcb->bge_maxlen_flags);
1683 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1684 /* Reset the jumbo receive producer ring producer index. */
1685 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1686 }
1687
1688 /* Disable the mini receive producer ring RCB. */
1689 if (BGE_IS_5700_FAMILY(sc)) {
1690 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1691 rcb->bge_maxlen_flags =
1692 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1693 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1694 rcb->bge_maxlen_flags);
1695 /* Reset the mini receive producer ring producer index. */
1696 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1697 }
1698
1699 /*
1700 * The BD ring replenish thresholds control how often the
1701 * hardware fetches new BD's from the producer rings in host
1702 * memory. Setting the value too low on a busy system can
1703 * starve the hardware and recue the throughpout.
1704 *
1705 * Set the BD ring replentish thresholds. The recommended
1706 * values are 1/8th the number of descriptors allocated to
1707 * each ring.
1708 * XXX The 5754 requires a lower threshold, so it might be a
1709 * requirement of all 575x family chips. The Linux driver sets
1710 * the lower threshold for all 5705 family chips as well, but there
1711 * are reports that it might not need to be so strict.
1712 *
1713 * XXX Linux does some extra fiddling here for the 5906 parts as
1714 * well.
1715 */
1716 if (BGE_IS_5705_PLUS(sc))
1717 val = 8;
1718 else
1719 val = BGE_STD_RX_RING_CNT / 8;
1720 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1721 if (BGE_IS_JUMBO_CAPABLE(sc))
1722 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1723 BGE_JUMBO_RX_RING_CNT/8);
1724
1725 /*
1726 * Disable all send rings by setting the 'ring disabled' bit
1727 * in the flags field of all the TX send ring control blocks,
1728 * located in NIC memory.
1729 */
1730 if (!BGE_IS_5705_PLUS(sc))
1731 /* 5700 to 5704 had 16 send rings. */
1732 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1733 else
1734 limit = 1;
1735 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1736 for (i = 0; i < limit; i++) {
1737 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1738 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1739 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1740 vrcb += sizeof(struct bge_rcb);
1741 }
1742
1743 /* Configure send ring RCB 0 (we use only the first ring) */
1744 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1745 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1746 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1747 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1748 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1749 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1750 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1751 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1752
1753 /*
1754 * Disable all receive return rings by setting the
1755 * 'ring diabled' bit in the flags field of all the receive
1756 * return ring control blocks, located in NIC memory.
1757 */
1758 if (!BGE_IS_5705_PLUS(sc))
1759 limit = BGE_RX_RINGS_MAX;
1760 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1761 limit = 4;
1762 else
1763 limit = 1;
1764 /* Disable all receive return rings. */
1765 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1766 for (i = 0; i < limit; i++) {
1767 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1768 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1769 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1770 BGE_RCB_FLAG_RING_DISABLED);
1771 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1772 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1773 (i * (sizeof(uint64_t))), 0);
1774 vrcb += sizeof(struct bge_rcb);
1775 }
1776
1777 /*
1778 * Set up receive return ring 0. Note that the NIC address
1779 * for RX return rings is 0x0. The return rings live entirely
1780 * within the host, so the nicaddr field in the RCB isn't used.
1781 */
1782 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1783 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1784 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1785 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1786 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1787 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1788 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1789
1790 /* Set random backoff seed for TX */
1791 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1792 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1793 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1794 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1795 BGE_TX_BACKOFF_SEED_MASK);
1796
1797 /* Set inter-packet gap */
1798 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1799
1800 /*
1801 * Specify which ring to use for packets that don't match
1802 * any RX rules.
1803 */
1804 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1805
1806 /*
1807 * Configure number of RX lists. One interrupt distribution
1808 * list, sixteen active lists, one bad frames class.
1809 */
1810 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1811
1812 /* Inialize RX list placement stats mask. */
1813 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1814 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1815
1816 /* Disable host coalescing until we get it set up */
1817 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1818
1819 /* Poll to make sure it's shut down. */
1820 for (i = 0; i < BGE_TIMEOUT; i++) {
1821 DELAY(10);
1822 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1823 break;
1824 }
1825
1826 if (i == BGE_TIMEOUT) {
1827 device_printf(sc->bge_dev,
1828 "host coalescing engine failed to idle\n");
1829 return (ENXIO);
1830 }
1831
1832 /* Set up host coalescing defaults */
1833 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1834 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1835 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1836 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1837 if (!(BGE_IS_5705_PLUS(sc))) {
1838 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1839 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1840 }
1841 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1842 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1843
1844 /* Set up address of statistics block */
1845 if (!(BGE_IS_5705_PLUS(sc))) {
1846 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1847 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1848 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1849 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1850 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1851 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1852 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1853 }
1854
1855 /* Set up address of status block */
1856 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1857 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1858 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1859 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1860
1861 /* Set up status block size. */
1862 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1863 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1864 val = BGE_STATBLKSZ_FULL;
1865 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1866 } else {
1867 val = BGE_STATBLKSZ_32BYTE;
1868 bzero(sc->bge_ldata.bge_status_block, 32);
1869 }
1870 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1871 sc->bge_cdata.bge_status_map,
1872 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1873
1874 /* Turn on host coalescing state machine */
1875 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1876
1877 /* Turn on RX BD completion state machine and enable attentions */
1878 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1879 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1880
1881 /* Turn on RX list placement state machine */
1882 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1883
1884 /* Turn on RX list selector state machine. */
1885 if (!(BGE_IS_5705_PLUS(sc)))
1886 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1887
1888 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1889 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1890 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1891 BGE_MACMODE_FRMHDR_DMA_ENB;
1892
1893 if (sc->bge_flags & BGE_FLAG_TBI)
1894 val |= BGE_PORTMODE_TBI;
1895 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1896 val |= BGE_PORTMODE_GMII;
1897 else
1898 val |= BGE_PORTMODE_MII;
1899
1900 /* Turn on DMA, clear stats */
1901 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1902
1903 /* Set misc. local control, enable interrupts on attentions */
1904 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1905
1906#ifdef notdef
1907 /* Assert GPIO pins for PHY reset */
1908 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1909 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1910 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1911 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1912#endif
1913
1914 /* Turn on DMA completion state machine */
1915 if (!(BGE_IS_5705_PLUS(sc)))
1916 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1917
1918 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1919
1920 /* Enable host coalescing bug fix. */
1921 if (BGE_IS_5755_PLUS(sc))
1922 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1923
1924 /* Request larger DMA burst size to get better performance. */
1925 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1926 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1927
1928 /* Turn on write DMA state machine */
1929 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1930 DELAY(40);
1931
1932 /* Turn on read DMA state machine */
1933 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1934 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1935 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1936 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1937 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1938 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1939 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1940 if (sc->bge_flags & BGE_FLAG_PCIE)
1941 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1942 if (sc->bge_flags & BGE_FLAG_TSO) {
1943 val |= BGE_RDMAMODE_TSO4_ENABLE;
1944 if (sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1945 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1946 val |= BGE_RDMAMODE_TSO6_ENABLE;
1947 }
1948 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1949 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1950 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1951 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1952 /*
1953 * Enable fix for read DMA FIFO overruns.
1954 * The fix is to limit the number of RX BDs
1955 * the hardware would fetch at a fime.
1956 */
1957 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1958 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
1959 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1960 }
1961 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1962 DELAY(40);
1963
1964 /* Turn on RX data completion state machine */
1965 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1966
1967 /* Turn on RX BD initiator state machine */
1968 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1969
1970 /* Turn on RX data and RX BD initiator state machine */
1971 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1972
1973 /* Turn on Mbuf cluster free state machine */
1974 if (!(BGE_IS_5705_PLUS(sc)))
1975 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1976
1977 /* Turn on send BD completion state machine */
1978 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1979
1980 /* Turn on send data completion state machine */
1981 val = BGE_SDCMODE_ENABLE;
1982 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1983 val |= BGE_SDCMODE_CDELAY;
1984 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1985
1986 /* Turn on send data initiator state machine */
1987 if (sc->bge_flags & BGE_FLAG_TSO)
1988 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1989 else
1990 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1991
1992 /* Turn on send BD initiator state machine */
1993 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1994
1995 /* Turn on send BD selector state machine */
1996 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1997
1998 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1999 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2000 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2001
2002 /* ack/clear link change events */
2003 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2004 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2005 BGE_MACSTAT_LINK_CHANGED);
2006 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2007
2008 /*
2009 * Enable attention when the link has changed state for
2010 * devices that use auto polling.
2011 */
2012 if (sc->bge_flags & BGE_FLAG_TBI) {
2013 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2014 } else {
2015 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2016 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2017 DELAY(80);
2018 }
2019 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2020 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2021 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2022 BGE_EVTENB_MI_INTERRUPT);
2023 }
2024
2025 /*
2026 * Clear any pending link state attention.
2027 * Otherwise some link state change events may be lost until attention
2028 * is cleared by bge_intr() -> bge_link_upd() sequence.
2029 * It's not necessary on newer BCM chips - perhaps enabling link
2030 * state change attentions implies clearing pending attention.
2031 */
2032 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2033 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2034 BGE_MACSTAT_LINK_CHANGED);
2035
2036 /* Enable link state change attentions. */
2037 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2038
2039 return (0);
2040}
2041
2042const struct bge_revision *
2043bge_lookup_rev(uint32_t chipid)
2044{
2045 const struct bge_revision *br;
2046
2047 for (br = bge_revisions; br->br_name != NULL; br++) {
2048 if (br->br_chipid == chipid)
2049 return (br);
2050 }
2051
2052 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2053 if (br->br_chipid == BGE_ASICREV(chipid))
2054 return (br);
2055 }
2056
2057 return (NULL);
2058}
2059
2060const struct bge_vendor *
2061bge_lookup_vendor(uint16_t vid)
2062{
2063 const struct bge_vendor *v;
2064
2065 for (v = bge_vendors; v->v_name != NULL; v++)
2066 if (v->v_id == vid)
2067 return (v);
2068
2069 panic("%s: unknown vendor %d", __func__, vid);
2070 return (NULL);
2071}
2072
2073/*
2074 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2075 * against our list and return its name if we find a match.
2076 *
2077 * Note that since the Broadcom controller contains VPD support, we
2078 * try to get the device name string from the controller itself instead
2079 * of the compiled-in string. It guarantees we'll always announce the
2080 * right product name. We fall back to the compiled-in string when
2081 * VPD is unavailable or corrupt.
2082 */
2083static int
2084bge_probe(device_t dev)
2085{
2086 const struct bge_type *t = bge_devs;
2087 struct bge_softc *sc = device_get_softc(dev);
2088 uint16_t vid, did;
2089
2090 sc->bge_dev = dev;
2091 vid = pci_get_vendor(dev);
2092 did = pci_get_device(dev);
2093 while(t->bge_vid != 0) {
2094 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2095 char model[64], buf[96];
2096 const struct bge_revision *br;
2097 const struct bge_vendor *v;
2098 uint32_t id;
2099
2100 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2101 BGE_PCIMISCCTL_ASICREV_SHIFT;
2102 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
2103 id = pci_read_config(dev,
2104 BGE_PCI_PRODID_ASICREV, 4);
2105 br = bge_lookup_rev(id);
2106 v = bge_lookup_vendor(vid);
2107 {
2108#if __FreeBSD_version > 700024
2109 const char *pname;
2110
2111 if (bge_has_eaddr(sc) &&
2112 pci_get_vpd_ident(dev, &pname) == 0)
2113 snprintf(model, 64, "%s", pname);
2114 else
2115#endif
2116 snprintf(model, 64, "%s %s",
2117 v->v_name,
2118 br != NULL ? br->br_name :
2119 "NetXtreme Ethernet Controller");
2120 }
2121 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2122 br != NULL ? "" : "unknown ", id);
2123 device_set_desc_copy(dev, buf);
2124 return (0);
2125 }
2126 t++;
2127 }
2128
2129 return (ENXIO);
2130}
2131
2132static void
2133bge_dma_free(struct bge_softc *sc)
2134{
2135 int i;
2136
2137 /* Destroy DMA maps for RX buffers. */
2138 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2139 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2140 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2141 sc->bge_cdata.bge_rx_std_dmamap[i]);
2142 }
2143 if (sc->bge_cdata.bge_rx_std_sparemap)
2144 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2145 sc->bge_cdata.bge_rx_std_sparemap);
2146
2147 /* Destroy DMA maps for jumbo RX buffers. */
2148 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2149 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2150 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2151 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2152 }
2153 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2154 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2155 sc->bge_cdata.bge_rx_jumbo_sparemap);
2156
2157 /* Destroy DMA maps for TX buffers. */
2158 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2159 if (sc->bge_cdata.bge_tx_dmamap[i])
2160 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2161 sc->bge_cdata.bge_tx_dmamap[i]);
2162 }
2163
2164 if (sc->bge_cdata.bge_rx_mtag)
2165 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2166 if (sc->bge_cdata.bge_tx_mtag)
2167 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2168
2169
2170 /* Destroy standard RX ring. */
2171 if (sc->bge_cdata.bge_rx_std_ring_map)
2172 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2173 sc->bge_cdata.bge_rx_std_ring_map);
2174 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2175 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2176 sc->bge_ldata.bge_rx_std_ring,
2177 sc->bge_cdata.bge_rx_std_ring_map);
2178
2179 if (sc->bge_cdata.bge_rx_std_ring_tag)
2180 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2181
2182 /* Destroy jumbo RX ring. */
2183 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2184 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2185 sc->bge_cdata.bge_rx_jumbo_ring_map);
2186
2187 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2188 sc->bge_ldata.bge_rx_jumbo_ring)
2189 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2190 sc->bge_ldata.bge_rx_jumbo_ring,
2191 sc->bge_cdata.bge_rx_jumbo_ring_map);
2192
2193 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2194 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2195
2196 /* Destroy RX return ring. */
2197 if (sc->bge_cdata.bge_rx_return_ring_map)
2198 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2199 sc->bge_cdata.bge_rx_return_ring_map);
2200
2201 if (sc->bge_cdata.bge_rx_return_ring_map &&
2202 sc->bge_ldata.bge_rx_return_ring)
2203 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2204 sc->bge_ldata.bge_rx_return_ring,
2205 sc->bge_cdata.bge_rx_return_ring_map);
2206
2207 if (sc->bge_cdata.bge_rx_return_ring_tag)
2208 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2209
2210 /* Destroy TX ring. */
2211 if (sc->bge_cdata.bge_tx_ring_map)
2212 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2213 sc->bge_cdata.bge_tx_ring_map);
2214
2215 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2216 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2217 sc->bge_ldata.bge_tx_ring,
2218 sc->bge_cdata.bge_tx_ring_map);
2219
2220 if (sc->bge_cdata.bge_tx_ring_tag)
2221 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2222
2223 /* Destroy status block. */
2224 if (sc->bge_cdata.bge_status_map)
2225 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2226 sc->bge_cdata.bge_status_map);
2227
2228 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2229 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2230 sc->bge_ldata.bge_status_block,
2231 sc->bge_cdata.bge_status_map);
2232
2233 if (sc->bge_cdata.bge_status_tag)
2234 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2235
2236 /* Destroy statistics block. */
2237 if (sc->bge_cdata.bge_stats_map)
2238 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2239 sc->bge_cdata.bge_stats_map);
2240
2241 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2242 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2243 sc->bge_ldata.bge_stats,
2244 sc->bge_cdata.bge_stats_map);
2245
2246 if (sc->bge_cdata.bge_stats_tag)
2247 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2248
2249 if (sc->bge_cdata.bge_buffer_tag)
2250 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2251
2252 /* Destroy the parent tag. */
2253 if (sc->bge_cdata.bge_parent_tag)
2254 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2255}
2256
2257static int
2258bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2259 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2260 bus_addr_t *paddr, const char *msg)
2261{
2262 struct bge_dmamap_arg ctx;
2263 bus_addr_t lowaddr;
2264 bus_size_t ring_end;
2265 int error;
2266
2267 lowaddr = BUS_SPACE_MAXADDR;
2268again:
2269 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2270 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2271 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2272 if (error != 0) {
2273 device_printf(sc->bge_dev,
2274 "could not create %s dma tag\n", msg);
2275 return (ENOMEM);
2276 }
2277 /* Allocate DMA'able memory for ring. */
2278 error = bus_dmamem_alloc(*tag, (void **)ring,
2279 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2280 if (error != 0) {
2281 device_printf(sc->bge_dev,
2282 "could not allocate DMA'able memory for %s\n", msg);
2283 return (ENOMEM);
2284 }
2285 /* Load the address of the ring. */
2286 ctx.bge_busaddr = 0;
2287 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2288 &ctx, BUS_DMA_NOWAIT);
2289 if (error != 0) {
2290 device_printf(sc->bge_dev,
2291 "could not load DMA'able memory for %s\n", msg);
2292 return (ENOMEM);
2293 }
2294 *paddr = ctx.bge_busaddr;
2295 ring_end = *paddr + maxsize;
2296 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2297 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2298 /*
2299 * 4GB boundary crossed. Limit maximum allowable DMA
2300 * address space to 32bit and try again.
2301 */
2302 bus_dmamap_unload(*tag, *map);
2303 bus_dmamem_free(*tag, *ring, *map);
2304 bus_dma_tag_destroy(*tag);
2305 if (bootverbose)
2306 device_printf(sc->bge_dev, "4GB boundary crossed, "
2307 "limit DMA address space to 32bit for %s\n", msg);
2308 *ring = NULL;
2309 *tag = NULL;
2310 *map = NULL;
2311 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2312 goto again;
2313 }
2314 return (0);
2315}
2316
2317static int
2318bge_dma_alloc(struct bge_softc *sc)
2319{
2320 bus_addr_t lowaddr;
2321 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2322 int i, error;
2323
2324 lowaddr = BUS_SPACE_MAXADDR;
2325 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2326 lowaddr = BGE_DMA_MAXADDR;
2327 /*
2328 * Allocate the parent bus DMA tag appropriate for PCI.
2329 */
2330 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2331 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2332 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2333 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2334 if (error != 0) {
2335 device_printf(sc->bge_dev,
2336 "could not allocate parent dma tag\n");
2337 return (ENOMEM);
2338 }
2339
2340 /* Create tag for standard RX ring. */
2341 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2342 &sc->bge_cdata.bge_rx_std_ring_tag,
2343 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2344 &sc->bge_cdata.bge_rx_std_ring_map,
2345 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2346 if (error)
2347 return (error);
2348
2349 /* Create tag for RX return ring. */
2350 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2351 &sc->bge_cdata.bge_rx_return_ring_tag,
2352 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2353 &sc->bge_cdata.bge_rx_return_ring_map,
2354 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2355 if (error)
2356 return (error);
2357
2358 /* Create tag for TX ring. */
2359 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2360 &sc->bge_cdata.bge_tx_ring_tag,
2361 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2362 &sc->bge_cdata.bge_tx_ring_map,
2363 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2364 if (error)
2365 return (error);
2366
2367 /*
2368 * Create tag for status block.
2369 * Because we only use single Tx/Rx/Rx return ring, use
2370 * minimum status block size except BCM5700 AX/BX which
2371 * seems to want to see full status block size regardless
2372 * of configured number of ring.
2373 */
2374 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2375 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2376 sbsz = BGE_STATUS_BLK_SZ;
2377 else
2378 sbsz = 32;
2379 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2380 &sc->bge_cdata.bge_status_tag,
2381 (uint8_t **)&sc->bge_ldata.bge_status_block,
2382 &sc->bge_cdata.bge_status_map,
2383 &sc->bge_ldata.bge_status_block_paddr, "status block");
2384 if (error)
2385 return (error);
2386
2387 /* Create tag for statistics block. */
2388 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2389 &sc->bge_cdata.bge_stats_tag,
2390 (uint8_t **)&sc->bge_ldata.bge_stats,
2391 &sc->bge_cdata.bge_stats_map,
2392 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2393 if (error)
2394 return (error);
2395
2396 /* Create tag for jumbo RX ring. */
2397 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2398 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2399 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2400 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2401 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2402 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2403 if (error)
2404 return (error);
2405 }
2406
2407 /* Create parent tag for buffers. */
2408 boundary = 0;
2409 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2410 boundary = BGE_DMA_BNDRY;
2411 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2412 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2413 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2414 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2415 if (error != 0) {
2416 device_printf(sc->bge_dev,
2417 "could not allocate buffer dma tag\n");
2418 return (ENOMEM);
2419 }
2420 /* Create tag for Tx mbufs. */
2421 if (sc->bge_flags & BGE_FLAG_TSO) {
2422 txsegsz = BGE_TSOSEG_SZ;
2423 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2424 } else {
2425 txsegsz = MCLBYTES;
2426 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2427 }
2428 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2429 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2430 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2431 &sc->bge_cdata.bge_tx_mtag);
2432
2433 if (error) {
2434 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2435 return (ENOMEM);
2436 }
2437
2438 /* Create tag for Rx mbufs. */
2439 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2440 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2441 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2442
2443 if (error) {
2444 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2445 return (ENOMEM);
2446 }
2447
2448 /* Create DMA maps for RX buffers. */
2449 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2450 &sc->bge_cdata.bge_rx_std_sparemap);
2451 if (error) {
2452 device_printf(sc->bge_dev,
2453 "can't create spare DMA map for RX\n");
2454 return (ENOMEM);
2455 }
2456 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2457 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2458 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2459 if (error) {
2460 device_printf(sc->bge_dev,
2461 "can't create DMA map for RX\n");
2462 return (ENOMEM);
2463 }
2464 }
2465
2466 /* Create DMA maps for TX buffers. */
2467 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2468 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2469 &sc->bge_cdata.bge_tx_dmamap[i]);
2470 if (error) {
2471 device_printf(sc->bge_dev,
2472 "can't create DMA map for TX\n");
2473 return (ENOMEM);
2474 }
2475 }
2476
2477 /* Create tags for jumbo RX buffers. */
2478 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2479 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2480 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2481 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2482 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2483 if (error) {
2484 device_printf(sc->bge_dev,
2485 "could not allocate jumbo dma tag\n");
2486 return (ENOMEM);
2487 }
2488 /* Create DMA maps for jumbo RX buffers. */
2489 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2490 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2491 if (error) {
2492 device_printf(sc->bge_dev,
2493 "can't create spare DMA map for jumbo RX\n");
2494 return (ENOMEM);
2495 }
2496 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2497 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2498 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2499 if (error) {
2500 device_printf(sc->bge_dev,
2501 "can't create DMA map for jumbo RX\n");
2502 return (ENOMEM);
2503 }
2504 }
2505 }
2506
2507 return (0);
2508}
2509
2510/*
2511 * Return true if this device has more than one port.
2512 */
2513static int
2514bge_has_multiple_ports(struct bge_softc *sc)
2515{
2516 device_t dev = sc->bge_dev;
2517 u_int b, d, f, fscan, s;
2518
2519 d = pci_get_domain(dev);
2520 b = pci_get_bus(dev);
2521 s = pci_get_slot(dev);
2522 f = pci_get_function(dev);
2523 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2524 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2525 return (1);
2526 return (0);
2527}
2528
2529/*
2530 * Return true if MSI can be used with this device.
2531 */
2532static int
2533bge_can_use_msi(struct bge_softc *sc)
2534{
2535 int can_use_msi = 0;
2536
2537 switch (sc->bge_asicrev) {
2538 case BGE_ASICREV_BCM5714_A0:
2539 case BGE_ASICREV_BCM5714:
2540 /*
2541 * Apparently, MSI doesn't work when these chips are
2542 * configured in single-port mode.
2543 */
2544 if (bge_has_multiple_ports(sc))
2545 can_use_msi = 1;
2546 break;
2547 case BGE_ASICREV_BCM5750:
2548 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2549 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2550 can_use_msi = 1;
2551 break;
2552 default:
2553 if (BGE_IS_575X_PLUS(sc))
2554 can_use_msi = 1;
2555 }
2556 return (can_use_msi);
2557}
2558
2559static int
2560bge_attach(device_t dev)
2561{
2562 struct ifnet *ifp;
2563 struct bge_softc *sc;
2564 uint32_t hwcfg = 0, misccfg;
2565 u_char eaddr[ETHER_ADDR_LEN];
2566 int error, msicount, reg, rid, trys;
2567
2568 sc = device_get_softc(dev);
2569 sc->bge_dev = dev;
2570
2571 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2572
2573 /*
2574 * Map control/status registers.
2575 */
2576 pci_enable_busmaster(dev);
2577
2578 rid = PCIR_BAR(0);
2579 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2580 RF_ACTIVE);
2581
2582 if (sc->bge_res == NULL) {
2583 device_printf (sc->bge_dev, "couldn't map memory\n");
2584 error = ENXIO;
2585 goto fail;
2586 }
2587
2588 /* Save various chip information. */
2589 sc->bge_chipid =
2590 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2591 BGE_PCIMISCCTL_ASICREV_SHIFT;
2592 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2593 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2594 4);
2595 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2596 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2597
2598 /* Set default PHY address. */
2599 sc->bge_phy_addr = 1;
2600
2601 /*
2602 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2603 * 5705 A0 and A1 chips.
2604 */
2605 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2606 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2607 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2608 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2609 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2610
2611 if (bge_has_eaddr(sc))
2612 sc->bge_flags |= BGE_FLAG_EADDR;
2613
2614 /* Save chipset family. */
2615 switch (sc->bge_asicrev) {
2616 case BGE_ASICREV_BCM5755:
2617 case BGE_ASICREV_BCM5761:
2618 case BGE_ASICREV_BCM5784:
2619 case BGE_ASICREV_BCM5785:
2620 case BGE_ASICREV_BCM5787:
2621 case BGE_ASICREV_BCM57780:
2622 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2623 BGE_FLAG_5705_PLUS;
2624 break;
2625 case BGE_ASICREV_BCM5700:
2626 case BGE_ASICREV_BCM5701:
2627 case BGE_ASICREV_BCM5703:
2628 case BGE_ASICREV_BCM5704:
2629 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2630 break;
2631 case BGE_ASICREV_BCM5714_A0:
2632 case BGE_ASICREV_BCM5780:
2633 case BGE_ASICREV_BCM5714:
2634 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2635 /* FALLTHROUGH */
2636 case BGE_ASICREV_BCM5750:
2637 case BGE_ASICREV_BCM5752:
2638 case BGE_ASICREV_BCM5906:
2639 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2640 /* FALLTHROUGH */
2641 case BGE_ASICREV_BCM5705:
2642 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2643 break;
2644 }
2645
2646 /* Set various PHY bug flags. */
2647 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2648 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2649 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2650 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2651 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2652 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2653 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2654 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2655 if (pci_get_subvendor(dev) == DELL_VENDORID)
2656 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2657 if ((BGE_IS_5705_PLUS(sc)) &&
2658 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2659 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2660 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2661 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2662 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2663 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2664 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2665 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2666 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2667 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2668 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2669 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2670 } else
2671 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2672 }
2673
2674 /* Identify the chips that use an CPMU. */
2675 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2676 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2677 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2678 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2679 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2680 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2681 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2682 else
2683 sc->bge_mi_mode = BGE_MIMODE_BASE;
2684 /* Enable auto polling for BCM570[0-5]. */
2685 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2686 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2687
2688 /*
2689 * All controllers that are not 5755 or higher have 4GB
2690 * boundary DMA bug.
2691 * Whenever an address crosses a multiple of the 4GB boundary
2692 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2693 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2694 * state machine will lockup and cause the device to hang.
2695 */
2696 if (BGE_IS_5755_PLUS(sc) == 0)
2697 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2698
2699 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2700 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2701 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2702 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2703 sc->bge_flags |= BGE_FLAG_5788;
2704 }
2705
2706 /*
2707 * Some controllers seem to require a special firmware to use
2708 * TSO. But the firmware is not available to FreeBSD and Linux
2709 * claims that the TSO performed by the firmware is slower than
2710 * hardware based TSO. Moreover the firmware based TSO has one
2711 * known bug which can't handle TSO if ethernet header + IP/TCP
2712 * header is greater than 80 bytes. The workaround for the TSO
2713 * bug exist but it seems it's too expensive than not using
2714 * TSO at all. Some hardwares also have the TSO bug so limit
2715 * the TSO to the controllers that are not affected TSO issues
2716 * (e.g. 5755 or higher).
2717 */
2718 if (BGE_IS_5755_PLUS(sc)) {
2719 /*
2720 * BCM5754 and BCM5787 shares the same ASIC id so
2721 * explicit device id check is required.
2722 * Due to unknown reason TSO does not work on BCM5755M.
2723 */
2724 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2725 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2726 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2727 sc->bge_flags |= BGE_FLAG_TSO;
2728 }
2729
2730 /*
2731 * Check if this is a PCI-X or PCI Express device.
2732 */
2733 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2734 /*
2735 * Found a PCI Express capabilities register, this
2736 * must be a PCI Express device.
2737 */
2738 sc->bge_flags |= BGE_FLAG_PCIE;
2739 sc->bge_expcap = reg;
2740 if (pci_get_max_read_req(dev) != 4096)
2741 pci_set_max_read_req(dev, 4096);
2742 } else {
2743 /*
2744 * Check if the device is in PCI-X Mode.
2745 * (This bit is not valid on PCI Express controllers.)
2746 */
2747 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2748 sc->bge_pcixcap = reg;
2749 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2750 BGE_PCISTATE_PCI_BUSMODE) == 0)
2751 sc->bge_flags |= BGE_FLAG_PCIX;
2752 }
2753
2754 /*
2755 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2756 * not actually a MAC controller bug but an issue with the embedded
2757 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2758 */
2759 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2760 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2761 /*
2762 * Allocate the interrupt, using MSI if possible. These devices
2763 * support 8 MSI messages, but only the first one is used in
2764 * normal operation.
2765 */
2766 rid = 0;
2767 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2768 sc->bge_msicap = reg;
2769 if (bge_can_use_msi(sc)) {
2770 msicount = pci_msi_count(dev);
2771 if (msicount > 1)
2772 msicount = 1;
2773 } else
2774 msicount = 0;
2775 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2776 rid = 1;
2777 sc->bge_flags |= BGE_FLAG_MSI;
2778 }
2779 }
2780
2781 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2782 RF_SHAREABLE | RF_ACTIVE);
2783
2784 if (sc->bge_irq == NULL) {
2785 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2786 error = ENXIO;
2787 goto fail;
2788 }
2789
2790 device_printf(dev,
2791 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2792 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2793 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2794 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2795
2796 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2797
2798 /* Try to reset the chip. */
2799 if (bge_reset(sc)) {
2800 device_printf(sc->bge_dev, "chip reset failed\n");
2801 error = ENXIO;
2802 goto fail;
2803 }
2804
2805 sc->bge_asf_mode = 0;
2806 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2807 == BGE_MAGIC_NUMBER)) {
2808 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2809 & BGE_HWCFG_ASF) {
2810 sc->bge_asf_mode |= ASF_ENABLE;
2811 sc->bge_asf_mode |= ASF_STACKUP;
2812 if (BGE_IS_575X_PLUS(sc))
2813 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2814 }
2815 }
2816
2817 /* Try to reset the chip again the nice way. */
2818 bge_stop_fw(sc);
2819 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2820 if (bge_reset(sc)) {
2821 device_printf(sc->bge_dev, "chip reset failed\n");
2822 error = ENXIO;
2823 goto fail;
2824 }
2825
2826 bge_sig_legacy(sc, BGE_RESET_STOP);
2827 bge_sig_post_reset(sc, BGE_RESET_STOP);
2828
2829 if (bge_chipinit(sc)) {
2830 device_printf(sc->bge_dev, "chip initialization failed\n");
2831 error = ENXIO;
2832 goto fail;
2833 }
2834
2835 error = bge_get_eaddr(sc, eaddr);
2836 if (error) {
2837 device_printf(sc->bge_dev,
2838 "failed to read station address\n");
2839 error = ENXIO;
2840 goto fail;
2841 }
2842
2843 /* 5705 limits RX return ring to 512 entries. */
2844 if (BGE_IS_5705_PLUS(sc))
2845 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2846 else
2847 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2848
2849 if (bge_dma_alloc(sc)) {
2850 device_printf(sc->bge_dev,
2851 "failed to allocate DMA resources\n");
2852 error = ENXIO;
2853 goto fail;
2854 }
2855
2856 bge_add_sysctls(sc);
2857
2858 /* Set default tuneable values. */
2859 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2860 sc->bge_rx_coal_ticks = 150;
2861 sc->bge_tx_coal_ticks = 150;
2862 sc->bge_rx_max_coal_bds = 10;
2863 sc->bge_tx_max_coal_bds = 10;
2864
2865 /* Initialize checksum features to use. */
2866 sc->bge_csum_features = BGE_CSUM_FEATURES;
2867 if (sc->bge_forced_udpcsum != 0)
2868 sc->bge_csum_features |= CSUM_UDP;
2869
2870 /* Set up ifnet structure */
2871 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2872 if (ifp == NULL) {
2873 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2874 error = ENXIO;
2875 goto fail;
2876 }
2877 ifp->if_softc = sc;
2878 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2879 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2880 ifp->if_ioctl = bge_ioctl;
2881 ifp->if_start = bge_start;
2882 ifp->if_init = bge_init;
2883 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2884 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2885 IFQ_SET_READY(&ifp->if_snd);
2886 ifp->if_hwassist = sc->bge_csum_features;
2887 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2888 IFCAP_VLAN_MTU;
2889 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2890 ifp->if_hwassist |= CSUM_TSO;
2891 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
2892 }
2893#ifdef IFCAP_VLAN_HWCSUM
2894 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2895#endif
2896 ifp->if_capenable = ifp->if_capabilities;
2897#ifdef DEVICE_POLLING
2898 ifp->if_capabilities |= IFCAP_POLLING;
2899#endif
2900
2901 /*
2902 * 5700 B0 chips do not support checksumming correctly due
2903 * to hardware bugs.
2904 */
2905 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2906 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2907 ifp->if_capenable &= ~IFCAP_HWCSUM;
2908 ifp->if_hwassist = 0;
2909 }
2910
2911 /*
2912 * Figure out what sort of media we have by checking the
2913 * hardware config word in the first 32k of NIC internal memory,
2914 * or fall back to examining the EEPROM if necessary.
2915 * Note: on some BCM5700 cards, this value appears to be unset.
2916 * If that's the case, we have to rely on identifying the NIC
2917 * by its PCI subsystem ID, as we do below for the SysKonnect
2918 * SK-9D41.
2919 */
2920 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2921 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2922 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2923 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2924 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2925 sizeof(hwcfg))) {
2926 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2927 error = ENXIO;
2928 goto fail;
2929 }
2930 hwcfg = ntohl(hwcfg);
2931 }
2932
2933 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2934 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
2935 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2936 if (BGE_IS_5714_FAMILY(sc))
2937 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2938 else
2939 sc->bge_flags |= BGE_FLAG_TBI;
2940 }
2941
2942 if (sc->bge_flags & BGE_FLAG_TBI) {
2943 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2944 bge_ifmedia_sts);
2945 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2946 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2947 0, NULL);
2948 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2949 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2950 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2951 } else {
2952 /*
2953 * Do transceiver setup and tell the firmware the
2954 * driver is down so we can try to get access the
2955 * probe if ASF is running. Retry a couple of times
2956 * if we get a conflict with the ASF firmware accessing
2957 * the PHY.
2958 */
2959 trys = 0;
2960 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2961again:
2962 bge_asf_driver_up(sc);
2963
2964 if (mii_phy_probe(dev, &sc->bge_miibus,
2965 bge_ifmedia_upd, bge_ifmedia_sts)) {
2966 if (trys++ < 4) {
2967 device_printf(sc->bge_dev, "Try again\n");
2968 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2969 BMCR_RESET);
2970 goto again;
2971 }
2972
2973 device_printf(sc->bge_dev, "MII without any PHY!\n");
2974 error = ENXIO;
2975 goto fail;
2976 }
2977
2978 /*
2979 * Now tell the firmware we are going up after probing the PHY
2980 */
2981 if (sc->bge_asf_mode & ASF_STACKUP)
2982 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2983 }
2984
2985 /*
2986 * When using the BCM5701 in PCI-X mode, data corruption has
2987 * been observed in the first few bytes of some received packets.
2988 * Aligning the packet buffer in memory eliminates the corruption.
2989 * Unfortunately, this misaligns the packet payloads. On platforms
2990 * which do not support unaligned accesses, we will realign the
2991 * payloads by copying the received packets.
2992 */
2993 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2994 sc->bge_flags & BGE_FLAG_PCIX)
2995 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2996
2997 /*
2998 * Call MI attach routine.
2999 */
3000 ether_ifattach(ifp, eaddr);
3001 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3002
3003 /* Tell upper layer we support long frames. */
3004 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3005
3006 /*
3007 * Hookup IRQ last.
3008 */
3009#if __FreeBSD_version > 700030
3010 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3011 /* Take advantage of single-shot MSI. */
3012 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3013 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3014 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3015 taskqueue_thread_enqueue, &sc->bge_tq);
3016 if (sc->bge_tq == NULL) {
3017 device_printf(dev, "could not create taskqueue.\n");
3018 ether_ifdetach(ifp);
3019 error = ENXIO;
3020 goto fail;
3021 }
3022 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3023 device_get_nameunit(sc->bge_dev));
3024 error = bus_setup_intr(dev, sc->bge_irq,
3025 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3026 &sc->bge_intrhand);
3027 if (error)
3028 ether_ifdetach(ifp);
3029 } else
3030 error = bus_setup_intr(dev, sc->bge_irq,
3031 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3032 &sc->bge_intrhand);
3033#else
3034 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
3035 bge_intr, sc, &sc->bge_intrhand);
3036#endif
3037
3038 if (error) {
3039 bge_detach(dev);
3040 device_printf(sc->bge_dev, "couldn't set up irq\n");
3041 }
3042
3043 return (0);
3044
3045fail:
3046 bge_release_resources(sc);
3047
3048 return (error);
3049}
3050
3051static int
3052bge_detach(device_t dev)
3053{
3054 struct bge_softc *sc;
3055 struct ifnet *ifp;
3056
3057 sc = device_get_softc(dev);
3058 ifp = sc->bge_ifp;
3059
3060#ifdef DEVICE_POLLING
3061 if (ifp->if_capenable & IFCAP_POLLING)
3062 ether_poll_deregister(ifp);
3063#endif
3064
3065 BGE_LOCK(sc);
3066 bge_stop(sc);
3067 bge_reset(sc);
3068 BGE_UNLOCK(sc);
3069
3070 callout_drain(&sc->bge_stat_ch);
3071
3072 if (sc->bge_tq)
3073 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3074 ether_ifdetach(ifp);
3075
3076 if (sc->bge_flags & BGE_FLAG_TBI) {
3077 ifmedia_removeall(&sc->bge_ifmedia);
3078 } else {
3079 bus_generic_detach(dev);
3080 device_delete_child(dev, sc->bge_miibus);
3081 }
3082
3083 bge_release_resources(sc);
3084
3085 return (0);
3086}
3087
3088static void
3089bge_release_resources(struct bge_softc *sc)
3090{
3091 device_t dev;
3092
3093 dev = sc->bge_dev;
3094
3095 if (sc->bge_tq != NULL)
3096 taskqueue_free(sc->bge_tq);
3097
3098 if (sc->bge_intrhand != NULL)
3099 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3100
3101 if (sc->bge_irq != NULL)
3102 bus_release_resource(dev, SYS_RES_IRQ,
3103 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3104
3105 if (sc->bge_flags & BGE_FLAG_MSI)
3106 pci_release_msi(dev);
3107
3108 if (sc->bge_res != NULL)
3109 bus_release_resource(dev, SYS_RES_MEMORY,
3110 PCIR_BAR(0), sc->bge_res);
3111
3112 if (sc->bge_ifp != NULL)
3113 if_free(sc->bge_ifp);
3114
3115 bge_dma_free(sc);
3116
3117 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3118 BGE_LOCK_DESTROY(sc);
3119}
3120
3121static int
3122bge_reset(struct bge_softc *sc)
3123{
3124 device_t dev;
3125 uint32_t cachesize, command, pcistate, reset, val;
3126 void (*write_op)(struct bge_softc *, int, int);
3127 uint16_t devctl;
3128 int i;
3129
3130 dev = sc->bge_dev;
3131
3132 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3133 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3134 if (sc->bge_flags & BGE_FLAG_PCIE)
3135 write_op = bge_writemem_direct;
3136 else
3137 write_op = bge_writemem_ind;
3138 } else
3139 write_op = bge_writereg_ind;
3140
3141 /* Save some important PCI state. */
3142 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3143 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3144 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3145
3146 pci_write_config(dev, BGE_PCI_MISC_CTL,
3147 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3148 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3149
3150 /* Disable fastboot on controllers that support it. */
3151 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3152 BGE_IS_5755_PLUS(sc)) {
3153 if (bootverbose)
3154 device_printf(dev, "Disabling fastboot\n");
3155 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3156 }
3157
3158 /*
3159 * Write the magic number to SRAM at offset 0xB50.
3160 * When firmware finishes its initialization it will
3161 * write ~BGE_MAGIC_NUMBER to the same location.
3162 */
3163 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3164
3165 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3166
3167 /* XXX: Broadcom Linux driver. */
3168 if (sc->bge_flags & BGE_FLAG_PCIE) {
3169 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3170 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3171 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3172 /* Prevent PCIE link training during global reset */
3173 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3174 reset |= 1 << 29;
3175 }
3176 }
3177
3178 /*
3179 * Set GPHY Power Down Override to leave GPHY
3180 * powered up in D0 uninitialized.
3181 */
3182 if (BGE_IS_5705_PLUS(sc))
3183 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3184
3185 /* Issue global reset */
3186 write_op(sc, BGE_MISC_CFG, reset);
3187
3188 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3189 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3190 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3191 val | BGE_VCPU_STATUS_DRV_RESET);
3192 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3193 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3194 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3195 }
3196
3197 DELAY(1000);
3198
3199 /* XXX: Broadcom Linux driver. */
3200 if (sc->bge_flags & BGE_FLAG_PCIE) {
3201 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3202 DELAY(500000); /* wait for link training to complete */
3203 val = pci_read_config(dev, 0xC4, 4);
3204 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3205 }
3206 devctl = pci_read_config(dev,
3207 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3208 /* Clear enable no snoop and disable relaxed ordering. */
3209 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3210 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3211 /* Set PCIE max payload size to 128. */
3212 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3213 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3214 devctl, 2);
3215 /* Clear error status. */
3216 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3217 PCIM_EXP_STA_CORRECTABLE_ERROR |
3218 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3219 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3220 }
3221
3222 /* Reset some of the PCI state that got zapped by reset. */
3223 pci_write_config(dev, BGE_PCI_MISC_CTL,
3224 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3225 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3226 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3227 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3228 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3229 /*
3230 * Disable PCI-X relaxed ordering to ensure status block update
3231 * comes first then packet buffer DMA. Otherwise driver may
3232 * read stale status block.
3233 */
3234 if (sc->bge_flags & BGE_FLAG_PCIX) {
3235 devctl = pci_read_config(dev,
3236 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3237 devctl &= ~PCIXM_COMMAND_ERO;
3238 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3239 devctl &= ~PCIXM_COMMAND_MAX_READ;
3240 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3241 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3242 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3243 PCIXM_COMMAND_MAX_READ);
3244 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3245 }
3246 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3247 devctl, 2);
3248 }
3249 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3250 if (BGE_IS_5714_FAMILY(sc)) {
3251 /* This chip disables MSI on reset. */
3252 if (sc->bge_flags & BGE_FLAG_MSI) {
3253 val = pci_read_config(dev,
3254 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3255 pci_write_config(dev,
3256 sc->bge_msicap + PCIR_MSI_CTRL,
3257 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3258 val = CSR_READ_4(sc, BGE_MSI_MODE);
3259 CSR_WRITE_4(sc, BGE_MSI_MODE,
3260 val | BGE_MSIMODE_ENABLE);
3261 }
3262 val = CSR_READ_4(sc, BGE_MARB_MODE);
3263 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3264 } else
3265 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3266
3267 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3268 for (i = 0; i < BGE_TIMEOUT; i++) {
3269 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3270 if (val & BGE_VCPU_STATUS_INIT_DONE)
3271 break;
3272 DELAY(100);
3273 }
3274 if (i == BGE_TIMEOUT) {
3275 device_printf(dev, "reset timed out\n");
3276 return (1);
3277 }
3278 } else {
3279 /*
3280 * Poll until we see the 1's complement of the magic number.
3281 * This indicates that the firmware initialization is complete.
3282 * We expect this to fail if no chip containing the Ethernet
3283 * address is fitted though.
3284 */
3285 for (i = 0; i < BGE_TIMEOUT; i++) {
3286 DELAY(10);
3287 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3288 if (val == ~BGE_MAGIC_NUMBER)
3289 break;
3290 }
3291
3292 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3293 device_printf(dev,
3294 "firmware handshake timed out, found 0x%08x\n",
3295 val);
3296 }
3297
3298 /*
3299 * XXX Wait for the value of the PCISTATE register to
3300 * return to its original pre-reset state. This is a
3301 * fairly good indicator of reset completion. If we don't
3302 * wait for the reset to fully complete, trying to read
3303 * from the device's non-PCI registers may yield garbage
3304 * results.
3305 */
3306 for (i = 0; i < BGE_TIMEOUT; i++) {
3307 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3308 break;
3309 DELAY(10);
3310 }
3311
3312 /* Fix up byte swapping. */
3313 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3314 BGE_MODECTL_BYTESWAP_DATA);
3315
3316 /* Tell the ASF firmware we are up */
3317 if (sc->bge_asf_mode & ASF_STACKUP)
3318 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3319
3320 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3321
3322 /*
3323 * The 5704 in TBI mode apparently needs some special
3324 * adjustment to insure the SERDES drive level is set
3325 * to 1.2V.
3326 */
3327 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3328 sc->bge_flags & BGE_FLAG_TBI) {
3329 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3330 val = (val & ~0xFFF) | 0x880;
3331 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3332 }
3333
3334 /* XXX: Broadcom Linux driver. */
3335 if (sc->bge_flags & BGE_FLAG_PCIE &&
3336 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3337 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3338 /* Enable Data FIFO protection. */
3339 val = CSR_READ_4(sc, 0x7C00);
3340 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3341 }
3342 DELAY(10000);
3343
3344 return (0);
3345}
3346
3347static __inline void
3348bge_rxreuse_std(struct bge_softc *sc, int i)
3349{
3350 struct bge_rx_bd *r;
3351
3352 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3353 r->bge_flags = BGE_RXBDFLAG_END;
3354 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3355 r->bge_idx = i;
3356 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3357}
3358
3359static __inline void
3360bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3361{
3362 struct bge_extrx_bd *r;
3363
3364 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3365 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3366 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3367 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3368 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3369 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3370 r->bge_idx = i;
3371 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3372}
3373
3374/*
3375 * Frame reception handling. This is called if there's a frame
3376 * on the receive return list.
3377 *
3378 * Note: we have to be able to handle two possibilities here:
3379 * 1) the frame is from the jumbo receive ring
3380 * 2) the frame is from the standard receive ring
3381 */
3382
3383static int
3384bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3385{
3386 struct ifnet *ifp;
3387 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3388 uint16_t rx_cons;
3389
3390 rx_cons = sc->bge_rx_saved_considx;
3391
3392 /* Nothing to do. */
3393 if (rx_cons == rx_prod)
3394 return (rx_npkts);
3395
3396 ifp = sc->bge_ifp;
3397
3398 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3399 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3400 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3401 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3402 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3403 (MCLBYTES - ETHER_ALIGN))
3404 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3405 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3406
3407 while (rx_cons != rx_prod) {
3408 struct bge_rx_bd *cur_rx;
3409 uint32_t rxidx;
3410 struct mbuf *m = NULL;
3411 uint16_t vlan_tag = 0;
3412 int have_tag = 0;
3413
3414#ifdef DEVICE_POLLING
3415 if (ifp->if_capenable & IFCAP_POLLING) {
3416 if (sc->rxcycles <= 0)
3417 break;
3418 sc->rxcycles--;
3419 }
3420#endif
3421
3422 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3423
3424 rxidx = cur_rx->bge_idx;
3425 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3426
3427 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3428 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3429 have_tag = 1;
3430 vlan_tag = cur_rx->bge_vlan_tag;
3431 }
3432
3433 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3434 jumbocnt++;
3435 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3436 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3437 bge_rxreuse_jumbo(sc, rxidx);
3438 continue;
3439 }
3440 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3441 bge_rxreuse_jumbo(sc, rxidx);
3442 ifp->if_iqdrops++;
3443 continue;
3444 }
3445 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3446 } else {
3447 stdcnt++;
3448 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3449 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3450 bge_rxreuse_std(sc, rxidx);
3451 continue;
3452 }
3453 if (bge_newbuf_std(sc, rxidx) != 0) {
3454 bge_rxreuse_std(sc, rxidx);
3455 ifp->if_iqdrops++;
3456 continue;
3457 }
3458 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3459 }
3460
3461 ifp->if_ipackets++;
3462#ifndef __NO_STRICT_ALIGNMENT
3463 /*
3464 * For architectures with strict alignment we must make sure
3465 * the payload is aligned.
3466 */
3467 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3468 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3469 cur_rx->bge_len);
3470 m->m_data += ETHER_ALIGN;
3471 }
3472#endif
3473 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3474 m->m_pkthdr.rcvif = ifp;
3475
3476 if (ifp->if_capenable & IFCAP_RXCSUM) {
3477 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3478 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3479 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3480 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3481 }
3482 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3483 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3484 m->m_pkthdr.csum_data =
3485 cur_rx->bge_tcp_udp_csum;
3486 m->m_pkthdr.csum_flags |=
3487 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3488 }
3489 }
3490
3491 /*
3492 * If we received a packet with a vlan tag,
3493 * attach that information to the packet.
3494 */
3495 if (have_tag) {
3496#if __FreeBSD_version > 700022
3497 m->m_pkthdr.ether_vtag = vlan_tag;
3498 m->m_flags |= M_VLANTAG;
3499#else
3500 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3501 if (m == NULL)
3502 continue;
3503#endif
3504 }
3505
3506 if (holdlck != 0) {
3507 BGE_UNLOCK(sc);
3508 (*ifp->if_input)(ifp, m);
3509 BGE_LOCK(sc);
3510 } else
3511 (*ifp->if_input)(ifp, m);
3512 rx_npkts++;
3513
3514 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3515 return (rx_npkts);
3516 }
3517
3518 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3519 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3520 if (stdcnt > 0)
3521 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3522 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3523
3524 if (jumbocnt > 0)
3525 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3526 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3527
3528 sc->bge_rx_saved_considx = rx_cons;
3529 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3530 if (stdcnt)
3531 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3532 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3533 if (jumbocnt)
3534 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3535 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3536#ifdef notyet
3537 /*
3538 * This register wraps very quickly under heavy packet drops.
3539 * If you need correct statistics, you can enable this check.
3540 */
3541 if (BGE_IS_5705_PLUS(sc))
3542 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3543#endif
3544 return (rx_npkts);
3545}
3546
3547static void
3548bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3549{
3550 struct bge_tx_bd *cur_tx;
3551 struct ifnet *ifp;
3552
3553 BGE_LOCK_ASSERT(sc);
3554
3555 /* Nothing to do. */
3556 if (sc->bge_tx_saved_considx == tx_cons)
3557 return;
3558
3559 ifp = sc->bge_ifp;
3560
3561 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3562 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3563 /*
3564 * Go through our tx ring and free mbufs for those
3565 * frames that have been sent.
3566 */
3567 while (sc->bge_tx_saved_considx != tx_cons) {
3568 uint32_t idx;
3569
3570 idx = sc->bge_tx_saved_considx;
3571 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3572 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3573 ifp->if_opackets++;
3574 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3575 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3576 sc->bge_cdata.bge_tx_dmamap[idx],
3577 BUS_DMASYNC_POSTWRITE);
3578 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3579 sc->bge_cdata.bge_tx_dmamap[idx]);
3580 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3581 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3582 }
3583 sc->bge_txcnt--;
3584 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3585 }
3586
3587 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3588 if (sc->bge_txcnt == 0)
3589 sc->bge_timer = 0;
3590}
3591
3592#ifdef DEVICE_POLLING
3593static int
3594bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3595{
3596 struct bge_softc *sc = ifp->if_softc;
3597 uint16_t rx_prod, tx_cons;
3598 uint32_t statusword;
3599 int rx_npkts = 0;
3600
3601 BGE_LOCK(sc);
3602 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3603 BGE_UNLOCK(sc);
3604 return (rx_npkts);
3605 }
3606
3607 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3608 sc->bge_cdata.bge_status_map,
3609 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3610 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3611 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3612
3613 statusword = sc->bge_ldata.bge_status_block->bge_status;
3614 sc->bge_ldata.bge_status_block->bge_status = 0;
3615
3616 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3617 sc->bge_cdata.bge_status_map,
3618 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3619
3620 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3621 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3622 sc->bge_link_evt++;
3623
3624 if (cmd == POLL_AND_CHECK_STATUS)
3625 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3626 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3627 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3628 bge_link_upd(sc);
3629
3630 sc->rxcycles = count;
3631 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3632 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3633 BGE_UNLOCK(sc);
3634 return (rx_npkts);
3635 }
3636 bge_txeof(sc, tx_cons);
3637 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3638 bge_start_locked(ifp);
3639
3640 BGE_UNLOCK(sc);
3641 return (rx_npkts);
3642}
3643#endif /* DEVICE_POLLING */
3644
3645static int
3646bge_msi_intr(void *arg)
3647{
3648 struct bge_softc *sc;
3649
3650 sc = (struct bge_softc *)arg;
3651 /*
3652 * This interrupt is not shared and controller already
3653 * disabled further interrupt.
3654 */
3655 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3656 return (FILTER_HANDLED);
3657}
3658
3659static void
3660bge_intr_task(void *arg, int pending)
3661{
3662 struct bge_softc *sc;
3663 struct ifnet *ifp;
3664 uint32_t status;
3665 uint16_t rx_prod, tx_cons;
3666
3667 sc = (struct bge_softc *)arg;
3668 ifp = sc->bge_ifp;
3669
3670 BGE_LOCK(sc);
3671 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3672 BGE_UNLOCK(sc);
3673 return;
3674 }
3675
3676 /* Get updated status block. */
3677 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3678 sc->bge_cdata.bge_status_map,
3679 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3680
3681 /* Save producer/consumer indexess. */
3682 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3683 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3684 status = sc->bge_ldata.bge_status_block->bge_status;
3685 sc->bge_ldata.bge_status_block->bge_status = 0;
3686 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3687 sc->bge_cdata.bge_status_map,
3688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3689
3690 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3691 bge_link_upd(sc);
3692
3693 /* Let controller work. */
3694 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3695
3696 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3697 sc->bge_rx_saved_considx != rx_prod) {
3698 /* Check RX return ring producer/consumer. */
3699 BGE_UNLOCK(sc);
3700 bge_rxeof(sc, rx_prod, 0);
3701 BGE_LOCK(sc);
3702 }
3703 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3704 /* Check TX ring producer/consumer. */
3705 bge_txeof(sc, tx_cons);
3706 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3707 bge_start_locked(ifp);
3708 }
3709 BGE_UNLOCK(sc);
3710}
3711
3712static void
3713bge_intr(void *xsc)
3714{
3715 struct bge_softc *sc;
3716 struct ifnet *ifp;
3717 uint32_t statusword;
3718 uint16_t rx_prod, tx_cons;
3719
3720 sc = xsc;
3721
3722 BGE_LOCK(sc);
3723
3724 ifp = sc->bge_ifp;
3725
3726#ifdef DEVICE_POLLING
3727 if (ifp->if_capenable & IFCAP_POLLING) {
3728 BGE_UNLOCK(sc);
3729 return;
3730 }
3731#endif
3732
3733 /*
3734 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3735 * disable interrupts by writing nonzero like we used to, since with
3736 * our current organization this just gives complications and
3737 * pessimizations for re-enabling interrupts. We used to have races
3738 * instead of the necessary complications. Disabling interrupts
3739 * would just reduce the chance of a status update while we are
3740 * running (by switching to the interrupt-mode coalescence
3741 * parameters), but this chance is already very low so it is more
3742 * efficient to get another interrupt than prevent it.
3743 *
3744 * We do the ack first to ensure another interrupt if there is a
3745 * status update after the ack. We don't check for the status
3746 * changing later because it is more efficient to get another
3747 * interrupt than prevent it, not quite as above (not checking is
3748 * a smaller optimization than not toggling the interrupt enable,
3749 * since checking doesn't involve PCI accesses and toggling require
3750 * the status check). So toggling would probably be a pessimization
3751 * even with MSI. It would only be needed for using a task queue.
3752 */
3753 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3754
3755 /*
3756 * Do the mandatory PCI flush as well as get the link status.
3757 */
3758 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3759
3760 /* Make sure the descriptor ring indexes are coherent. */
3761 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3762 sc->bge_cdata.bge_status_map,
3763 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3764 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3765 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3766 sc->bge_ldata.bge_status_block->bge_status = 0;
3767 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3768 sc->bge_cdata.bge_status_map,
3769 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3770
3771 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3772 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3773 statusword || sc->bge_link_evt)
3774 bge_link_upd(sc);
3775
3776 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3777 /* Check RX return ring producer/consumer. */
3778 bge_rxeof(sc, rx_prod, 1);
3779 }
3780
3781 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3782 /* Check TX ring producer/consumer. */
3783 bge_txeof(sc, tx_cons);
3784 }
3785
3786 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3787 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3788 bge_start_locked(ifp);
3789
3790 BGE_UNLOCK(sc);
3791}
3792
3793static void
3794bge_asf_driver_up(struct bge_softc *sc)
3795{
3796 if (sc->bge_asf_mode & ASF_STACKUP) {
3797 /* Send ASF heartbeat aprox. every 2s */
3798 if (sc->bge_asf_count)
3799 sc->bge_asf_count --;
3800 else {
3801 sc->bge_asf_count = 2;
3802 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3803 BGE_FW_DRV_ALIVE);
3804 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3805 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3806 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3807 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3808 }
3809 }
3810}
3811
3812static void
3813bge_tick(void *xsc)
3814{
3815 struct bge_softc *sc = xsc;
3816 struct mii_data *mii = NULL;
3817
3818 BGE_LOCK_ASSERT(sc);
3819
3820 /* Synchronize with possible callout reset/stop. */
3821 if (callout_pending(&sc->bge_stat_ch) ||
3822 !callout_active(&sc->bge_stat_ch))
3823 return;
3824
3825 if (BGE_IS_5705_PLUS(sc))
3826 bge_stats_update_regs(sc);
3827 else
3828 bge_stats_update(sc);
3829
3830 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3831 mii = device_get_softc(sc->bge_miibus);
3832 /*
3833 * Do not touch PHY if we have link up. This could break
3834 * IPMI/ASF mode or produce extra input errors
3835 * (extra errors was reported for bcm5701 & bcm5704).
3836 */
3837 if (!sc->bge_link)
3838 mii_tick(mii);
3839 } else {
3840 /*
3841 * Since in TBI mode auto-polling can't be used we should poll
3842 * link status manually. Here we register pending link event
3843 * and trigger interrupt.
3844 */
3845#ifdef DEVICE_POLLING
3846 /* In polling mode we poll link state in bge_poll(). */
3847 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3848#endif
3849 {
3850 sc->bge_link_evt++;
3851 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3852 sc->bge_flags & BGE_FLAG_5788)
3853 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3854 else
3855 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3856 }
3857 }
3858
3859 bge_asf_driver_up(sc);
3860 bge_watchdog(sc);
3861
3862 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3863}
3864
3865static void
3866bge_stats_update_regs(struct bge_softc *sc)
3867{
3868 struct ifnet *ifp;
3869 struct bge_mac_stats *stats;
3870
3871 ifp = sc->bge_ifp;
3872 stats = &sc->bge_mac_stats;
3873
3874 stats->ifHCOutOctets +=
3875 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3876 stats->etherStatsCollisions +=
3877 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3878 stats->outXonSent +=
3879 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3880 stats->outXoffSent +=
3881 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3882 stats->dot3StatsInternalMacTransmitErrors +=
3883 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3884 stats->dot3StatsSingleCollisionFrames +=
3885 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3886 stats->dot3StatsMultipleCollisionFrames +=
3887 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3888 stats->dot3StatsDeferredTransmissions +=
3889 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3890 stats->dot3StatsExcessiveCollisions +=
3891 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3892 stats->dot3StatsLateCollisions +=
3893 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3894 stats->ifHCOutUcastPkts +=
3895 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3896 stats->ifHCOutMulticastPkts +=
3897 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3898 stats->ifHCOutBroadcastPkts +=
3899 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3900
3901 stats->ifHCInOctets +=
3902 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3903 stats->etherStatsFragments +=
3904 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3905 stats->ifHCInUcastPkts +=
3906 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3907 stats->ifHCInMulticastPkts +=
3908 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3909 stats->ifHCInBroadcastPkts +=
3910 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3911 stats->dot3StatsFCSErrors +=
3912 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3913 stats->dot3StatsAlignmentErrors +=
3914 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3915 stats->xonPauseFramesReceived +=
3916 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3917 stats->xoffPauseFramesReceived +=
3918 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3919 stats->macControlFramesReceived +=
3920 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3921 stats->xoffStateEntered +=
3922 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3923 stats->dot3StatsFramesTooLong +=
3924 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3925 stats->etherStatsJabbers +=
3926 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3927 stats->etherStatsUndersizePkts +=
3928 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3929
3930 stats->FramesDroppedDueToFilters +=
3931 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3932 stats->DmaWriteQueueFull +=
3933 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3934 stats->DmaWriteHighPriQueueFull +=
3935 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3936 stats->NoMoreRxBDs +=
3937 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3938 stats->InputDiscards +=
3939 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3940 stats->InputErrors +=
3941 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3942 stats->RecvThresholdHit +=
3943 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3944
3945 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
3946 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
3947 stats->InputErrors);
3948}
3949
3950static void
3951bge_stats_clear_regs(struct bge_softc *sc)
3952{
3953
3954 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3955 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3956 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3957 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3958 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3959 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3960 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3961 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3962 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3963 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3964 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3965 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3966 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3967
3968 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3969 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3970 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3971 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3972 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3973 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3974 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3975 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3976 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3977 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3978 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3979 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3980 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3981 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3982
3983 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3984 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3985 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3986 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3987 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3988 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3989 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3990}
3991
3992static void
3993bge_stats_update(struct bge_softc *sc)
3994{
3995 struct ifnet *ifp;
3996 bus_size_t stats;
3997 uint32_t cnt; /* current register value */
3998
3999 ifp = sc->bge_ifp;
4000
4001 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4002
4003#define READ_STAT(sc, stats, stat) \
4004 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4005
4006 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4007 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4008 sc->bge_tx_collisions = cnt;
4009
4010 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4011 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4012 sc->bge_rx_discards = cnt;
4013
4014 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4015 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4016 sc->bge_tx_discards = cnt;
4017
4018#undef READ_STAT
4019}
4020
4021/*
4022 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4023 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4024 * but when such padded frames employ the bge IP/TCP checksum offload,
4025 * the hardware checksum assist gives incorrect results (possibly
4026 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4027 * If we pad such runts with zeros, the onboard checksum comes out correct.
4028 */
4029static __inline int
4030bge_cksum_pad(struct mbuf *m)
4031{
4032 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4033 struct mbuf *last;
4034
4035 /* If there's only the packet-header and we can pad there, use it. */
4036 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4037 M_TRAILINGSPACE(m) >= padlen) {
4038 last = m;
4039 } else {
4040 /*
4041 * Walk packet chain to find last mbuf. We will either
4042 * pad there, or append a new mbuf and pad it.
4043 */
4044 for (last = m; last->m_next != NULL; last = last->m_next);
4045 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4046 /* Allocate new empty mbuf, pad it. Compact later. */
4047 struct mbuf *n;
4048
4049 MGET(n, M_DONTWAIT, MT_DATA);
4050 if (n == NULL)
4051 return (ENOBUFS);
4052 n->m_len = 0;
4053 last->m_next = n;
4054 last = n;
4055 }
4056 }
4057
4058 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4059 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4060 last->m_len += padlen;
4061 m->m_pkthdr.len += padlen;
4062
4063 return (0);
4064}
4065
4066static struct mbuf *
4067bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
4068{
4069 struct ip *ip;
4070 struct tcphdr *tcp;
4071 struct mbuf *n;
4072 uint16_t hlen;
4073 uint32_t poff;
4074
4075 if (M_WRITABLE(m) == 0) {
4076 /* Get a writable copy. */
4077 n = m_dup(m, M_DONTWAIT);
4078 m_freem(m);
4079 if (n == NULL)
4080 return (NULL);
4081 m = n;
4082 }
4083 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4084 if (m == NULL)
4085 return (NULL);
4086 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4087 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4088 m = m_pullup(m, poff + sizeof(struct tcphdr));
4089 if (m == NULL)
4090 return (NULL);
4091 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4092 m = m_pullup(m, poff + (tcp->th_off << 2));
4093 if (m == NULL)
4094 return (NULL);
4095 /*
4096 * It seems controller doesn't modify IP length and TCP pseudo
4097 * checksum. These checksum computed by upper stack should be 0.
4098 */
4099 *mss = m->m_pkthdr.tso_segsz;
4100 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4100 ip->ip_sum = 0;
4101 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4102 /* Clear pseudo checksum computed by TCP stack. */
4101 ip->ip_sum = 0;
4102 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4103 /* Clear pseudo checksum computed by TCP stack. */
4104 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4103 tcp->th_sum = 0;
4104 /*
4105 * Broadcom controllers uses different descriptor format for
4106 * TSO depending on ASIC revision. Due to TSO-capable firmware
4107 * license issue and lower performance of firmware based TSO
4108 * we only support hardware based TSO which is applicable for
4109 * BCM5755 or newer controllers. Hardware based TSO uses 11
4110 * bits to store MSS and upper 5 bits are used to store IP/TCP
4111 * header length(including IP/TCP options). The header length
4112 * is expressed as 32 bits unit.
4113 */
4114 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4115 *mss |= (hlen << 11);
4116 return (m);
4117}
4118
4119/*
4120 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4121 * pointers to descriptors.
4122 */
4123static int
4124bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4125{
4126 bus_dma_segment_t segs[BGE_NSEG_NEW];
4127 bus_dmamap_t map;
4128 struct bge_tx_bd *d;
4129 struct mbuf *m = *m_head;
4130 uint32_t idx = *txidx;
4131 uint16_t csum_flags, mss, vlan_tag;
4132 int nsegs, i, error;
4133
4134 csum_flags = 0;
4135 mss = 0;
4136 vlan_tag = 0;
4137 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4138 *m_head = m = bge_setup_tso(sc, m, &mss);
4139 if (*m_head == NULL)
4140 return (ENOBUFS);
4141 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4142 BGE_TXBDFLAG_CPU_POST_DMA;
4143 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4144 if (m->m_pkthdr.csum_flags & CSUM_IP)
4145 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4146 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4147 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4148 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4149 (error = bge_cksum_pad(m)) != 0) {
4150 m_freem(m);
4151 *m_head = NULL;
4152 return (error);
4153 }
4154 }
4155 if (m->m_flags & M_LASTFRAG)
4156 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4157 else if (m->m_flags & M_FRAG)
4158 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4159 }
4160
4161 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
4162 sc->bge_forced_collapse > 0 &&
4163 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4164 /*
4165 * Forcedly collapse mbuf chains to overcome hardware
4166 * limitation which only support a single outstanding
4167 * DMA read operation.
4168 */
4169 if (sc->bge_forced_collapse == 1)
4170 m = m_defrag(m, M_DONTWAIT);
4171 else
4172 m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
4173 if (m == NULL)
4174 m = *m_head;
4175 *m_head = m;
4176 }
4177
4178 map = sc->bge_cdata.bge_tx_dmamap[idx];
4179 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4180 &nsegs, BUS_DMA_NOWAIT);
4181 if (error == EFBIG) {
4182 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4183 if (m == NULL) {
4184 m_freem(*m_head);
4185 *m_head = NULL;
4186 return (ENOBUFS);
4187 }
4188 *m_head = m;
4189 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4190 m, segs, &nsegs, BUS_DMA_NOWAIT);
4191 if (error) {
4192 m_freem(m);
4193 *m_head = NULL;
4194 return (error);
4195 }
4196 } else if (error != 0)
4197 return (error);
4198
4199 /* Check if we have enough free send BDs. */
4200 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4201 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4202 return (ENOBUFS);
4203 }
4204
4205 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4206
4207#if __FreeBSD_version > 700022
4208 if (m->m_flags & M_VLANTAG) {
4209 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4210 vlan_tag = m->m_pkthdr.ether_vtag;
4211 }
4212#else
4213 {
4214 struct m_tag *mtag;
4215
4216 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4217 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4218 vlan_tag = VLAN_TAG_VALUE(mtag);
4219 }
4220 }
4221#endif
4222 for (i = 0; ; i++) {
4223 d = &sc->bge_ldata.bge_tx_ring[idx];
4224 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4225 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4226 d->bge_len = segs[i].ds_len;
4227 d->bge_flags = csum_flags;
4228 d->bge_vlan_tag = vlan_tag;
4229 d->bge_mss = mss;
4230 if (i == nsegs - 1)
4231 break;
4232 BGE_INC(idx, BGE_TX_RING_CNT);
4233 }
4234
4235 /* Mark the last segment as end of packet... */
4236 d->bge_flags |= BGE_TXBDFLAG_END;
4237
4238 /*
4239 * Insure that the map for this transmission
4240 * is placed at the array index of the last descriptor
4241 * in this chain.
4242 */
4243 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4244 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4245 sc->bge_cdata.bge_tx_chain[idx] = m;
4246 sc->bge_txcnt += nsegs;
4247
4248 BGE_INC(idx, BGE_TX_RING_CNT);
4249 *txidx = idx;
4250
4251 return (0);
4252}
4253
4254/*
4255 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4256 * to the mbuf data regions directly in the transmit descriptors.
4257 */
4258static void
4259bge_start_locked(struct ifnet *ifp)
4260{
4261 struct bge_softc *sc;
4262 struct mbuf *m_head;
4263 uint32_t prodidx;
4264 int count;
4265
4266 sc = ifp->if_softc;
4267 BGE_LOCK_ASSERT(sc);
4268
4269 if (!sc->bge_link ||
4270 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4271 IFF_DRV_RUNNING)
4272 return;
4273
4274 prodidx = sc->bge_tx_prodidx;
4275
4276 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4277 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4278 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4279 break;
4280 }
4281 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4282 if (m_head == NULL)
4283 break;
4284
4285 /*
4286 * XXX
4287 * The code inside the if() block is never reached since we
4288 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4289 * requests to checksum TCP/UDP in a fragmented packet.
4290 *
4291 * XXX
4292 * safety overkill. If this is a fragmented packet chain
4293 * with delayed TCP/UDP checksums, then only encapsulate
4294 * it if we have enough descriptors to handle the entire
4295 * chain at once.
4296 * (paranoia -- may not actually be needed)
4297 */
4298 if (m_head->m_flags & M_FIRSTFRAG &&
4299 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4300 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4301 m_head->m_pkthdr.csum_data + 16) {
4302 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4303 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4304 break;
4305 }
4306 }
4307
4308 /*
4309 * Pack the data into the transmit ring. If we
4310 * don't have room, set the OACTIVE flag and wait
4311 * for the NIC to drain the ring.
4312 */
4313 if (bge_encap(sc, &m_head, &prodidx)) {
4314 if (m_head == NULL)
4315 break;
4316 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4317 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4318 break;
4319 }
4320 ++count;
4321
4322 /*
4323 * If there's a BPF listener, bounce a copy of this frame
4324 * to him.
4325 */
4326#ifdef ETHER_BPF_MTAP
4327 ETHER_BPF_MTAP(ifp, m_head);
4328#else
4329 BPF_MTAP(ifp, m_head);
4330#endif
4331 }
4332
4333 if (count > 0) {
4334 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4335 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4336 /* Transmit. */
4337 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4338 /* 5700 b2 errata */
4339 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4340 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4341
4342 sc->bge_tx_prodidx = prodidx;
4343
4344 /*
4345 * Set a timeout in case the chip goes out to lunch.
4346 */
4347 sc->bge_timer = 5;
4348 }
4349}
4350
4351/*
4352 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4353 * to the mbuf data regions directly in the transmit descriptors.
4354 */
4355static void
4356bge_start(struct ifnet *ifp)
4357{
4358 struct bge_softc *sc;
4359
4360 sc = ifp->if_softc;
4361 BGE_LOCK(sc);
4362 bge_start_locked(ifp);
4363 BGE_UNLOCK(sc);
4364}
4365
4366static void
4367bge_init_locked(struct bge_softc *sc)
4368{
4369 struct ifnet *ifp;
4370 uint16_t *m;
4371
4372 BGE_LOCK_ASSERT(sc);
4373
4374 ifp = sc->bge_ifp;
4375
4376 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4377 return;
4378
4379 /* Cancel pending I/O and flush buffers. */
4380 bge_stop(sc);
4381
4382 bge_stop_fw(sc);
4383 bge_sig_pre_reset(sc, BGE_RESET_START);
4384 bge_reset(sc);
4385 bge_sig_legacy(sc, BGE_RESET_START);
4386 bge_sig_post_reset(sc, BGE_RESET_START);
4387
4388 bge_chipinit(sc);
4389
4390 /*
4391 * Init the various state machines, ring
4392 * control blocks and firmware.
4393 */
4394 if (bge_blockinit(sc)) {
4395 device_printf(sc->bge_dev, "initialization failure\n");
4396 return;
4397 }
4398
4399 ifp = sc->bge_ifp;
4400
4401 /* Specify MTU. */
4402 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4403 ETHER_HDR_LEN + ETHER_CRC_LEN +
4404 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4405
4406 /* Load our MAC address. */
4407 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4408 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4409 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4410
4411 /* Program promiscuous mode. */
4412 bge_setpromisc(sc);
4413
4414 /* Program multicast filter. */
4415 bge_setmulti(sc);
4416
4417 /* Program VLAN tag stripping. */
4418 bge_setvlan(sc);
4419
4420 /* Override UDP checksum offloading. */
4421 if (sc->bge_forced_udpcsum == 0)
4422 sc->bge_csum_features &= ~CSUM_UDP;
4423 else
4424 sc->bge_csum_features |= CSUM_UDP;
4425 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4426 ifp->if_capenable & IFCAP_TXCSUM) {
4427 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4428 ifp->if_hwassist |= sc->bge_csum_features;
4429 }
4430
4431 /* Init RX ring. */
4432 if (bge_init_rx_ring_std(sc) != 0) {
4433 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4434 bge_stop(sc);
4435 return;
4436 }
4437
4438 /*
4439 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4440 * memory to insure that the chip has in fact read the first
4441 * entry of the ring.
4442 */
4443 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4444 uint32_t v, i;
4445 for (i = 0; i < 10; i++) {
4446 DELAY(20);
4447 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4448 if (v == (MCLBYTES - ETHER_ALIGN))
4449 break;
4450 }
4451 if (i == 10)
4452 device_printf (sc->bge_dev,
4453 "5705 A0 chip failed to load RX ring\n");
4454 }
4455
4456 /* Init jumbo RX ring. */
4457 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4458 (MCLBYTES - ETHER_ALIGN)) {
4459 if (bge_init_rx_ring_jumbo(sc) != 0) {
4460 device_printf(sc->bge_dev,
4461 "no memory for jumbo Rx buffers.\n");
4462 bge_stop(sc);
4463 return;
4464 }
4465 }
4466
4467 /* Init our RX return ring index. */
4468 sc->bge_rx_saved_considx = 0;
4469
4470 /* Init our RX/TX stat counters. */
4471 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4472
4473 /* Init TX ring. */
4474 bge_init_tx_ring(sc);
4475
4476 /* Turn on transmitter. */
4477 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4478
4479 /* Turn on receiver. */
4480 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4481
4482 /*
4483 * Set the number of good frames to receive after RX MBUF
4484 * Low Watermark has been reached. After the RX MAC receives
4485 * this number of frames, it will drop subsequent incoming
4486 * frames until the MBUF High Watermark is reached.
4487 */
4488 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4489
4490 /* Clear MAC statistics. */
4491 if (BGE_IS_5705_PLUS(sc))
4492 bge_stats_clear_regs(sc);
4493
4494 /* Tell firmware we're alive. */
4495 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4496
4497#ifdef DEVICE_POLLING
4498 /* Disable interrupts if we are polling. */
4499 if (ifp->if_capenable & IFCAP_POLLING) {
4500 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4501 BGE_PCIMISCCTL_MASK_PCI_INTR);
4502 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4503 } else
4504#endif
4505
4506 /* Enable host interrupts. */
4507 {
4508 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4509 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4510 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4511 }
4512
4513 bge_ifmedia_upd_locked(ifp);
4514
4515 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4516 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4517
4518 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4519}
4520
4521static void
4522bge_init(void *xsc)
4523{
4524 struct bge_softc *sc = xsc;
4525
4526 BGE_LOCK(sc);
4527 bge_init_locked(sc);
4528 BGE_UNLOCK(sc);
4529}
4530
4531/*
4532 * Set media options.
4533 */
4534static int
4535bge_ifmedia_upd(struct ifnet *ifp)
4536{
4537 struct bge_softc *sc = ifp->if_softc;
4538 int res;
4539
4540 BGE_LOCK(sc);
4541 res = bge_ifmedia_upd_locked(ifp);
4542 BGE_UNLOCK(sc);
4543
4544 return (res);
4545}
4546
4547static int
4548bge_ifmedia_upd_locked(struct ifnet *ifp)
4549{
4550 struct bge_softc *sc = ifp->if_softc;
4551 struct mii_data *mii;
4552 struct mii_softc *miisc;
4553 struct ifmedia *ifm;
4554
4555 BGE_LOCK_ASSERT(sc);
4556
4557 ifm = &sc->bge_ifmedia;
4558
4559 /* If this is a 1000baseX NIC, enable the TBI port. */
4560 if (sc->bge_flags & BGE_FLAG_TBI) {
4561 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4562 return (EINVAL);
4563 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4564 case IFM_AUTO:
4565 /*
4566 * The BCM5704 ASIC appears to have a special
4567 * mechanism for programming the autoneg
4568 * advertisement registers in TBI mode.
4569 */
4570 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4571 uint32_t sgdig;
4572 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4573 if (sgdig & BGE_SGDIGSTS_DONE) {
4574 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4575 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4576 sgdig |= BGE_SGDIGCFG_AUTO |
4577 BGE_SGDIGCFG_PAUSE_CAP |
4578 BGE_SGDIGCFG_ASYM_PAUSE;
4579 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4580 sgdig | BGE_SGDIGCFG_SEND);
4581 DELAY(5);
4582 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4583 }
4584 }
4585 break;
4586 case IFM_1000_SX:
4587 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4588 BGE_CLRBIT(sc, BGE_MAC_MODE,
4589 BGE_MACMODE_HALF_DUPLEX);
4590 } else {
4591 BGE_SETBIT(sc, BGE_MAC_MODE,
4592 BGE_MACMODE_HALF_DUPLEX);
4593 }
4594 break;
4595 default:
4596 return (EINVAL);
4597 }
4598 return (0);
4599 }
4600
4601 sc->bge_link_evt++;
4602 mii = device_get_softc(sc->bge_miibus);
4603 if (mii->mii_instance)
4604 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4605 mii_phy_reset(miisc);
4606 mii_mediachg(mii);
4607
4608 /*
4609 * Force an interrupt so that we will call bge_link_upd
4610 * if needed and clear any pending link state attention.
4611 * Without this we are not getting any further interrupts
4612 * for link state changes and thus will not UP the link and
4613 * not be able to send in bge_start_locked. The only
4614 * way to get things working was to receive a packet and
4615 * get an RX intr.
4616 * bge_tick should help for fiber cards and we might not
4617 * need to do this here if BGE_FLAG_TBI is set but as
4618 * we poll for fiber anyway it should not harm.
4619 */
4620 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4621 sc->bge_flags & BGE_FLAG_5788)
4622 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4623 else
4624 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4625
4626 return (0);
4627}
4628
4629/*
4630 * Report current media status.
4631 */
4632static void
4633bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4634{
4635 struct bge_softc *sc = ifp->if_softc;
4636 struct mii_data *mii;
4637
4638 BGE_LOCK(sc);
4639
4640 if (sc->bge_flags & BGE_FLAG_TBI) {
4641 ifmr->ifm_status = IFM_AVALID;
4642 ifmr->ifm_active = IFM_ETHER;
4643 if (CSR_READ_4(sc, BGE_MAC_STS) &
4644 BGE_MACSTAT_TBI_PCS_SYNCHED)
4645 ifmr->ifm_status |= IFM_ACTIVE;
4646 else {
4647 ifmr->ifm_active |= IFM_NONE;
4648 BGE_UNLOCK(sc);
4649 return;
4650 }
4651 ifmr->ifm_active |= IFM_1000_SX;
4652 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4653 ifmr->ifm_active |= IFM_HDX;
4654 else
4655 ifmr->ifm_active |= IFM_FDX;
4656 BGE_UNLOCK(sc);
4657 return;
4658 }
4659
4660 mii = device_get_softc(sc->bge_miibus);
4661 mii_pollstat(mii);
4662 ifmr->ifm_active = mii->mii_media_active;
4663 ifmr->ifm_status = mii->mii_media_status;
4664
4665 BGE_UNLOCK(sc);
4666}
4667
4668static int
4669bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4670{
4671 struct bge_softc *sc = ifp->if_softc;
4672 struct ifreq *ifr = (struct ifreq *) data;
4673 struct mii_data *mii;
4674 int flags, mask, error = 0;
4675
4676 switch (command) {
4677 case SIOCSIFMTU:
4678 BGE_LOCK(sc);
4679 if (ifr->ifr_mtu < ETHERMIN ||
4680 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4681 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4682 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4683 ifr->ifr_mtu > ETHERMTU))
4684 error = EINVAL;
4685 else if (ifp->if_mtu != ifr->ifr_mtu) {
4686 ifp->if_mtu = ifr->ifr_mtu;
4687 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4688 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4689 bge_init_locked(sc);
4690 }
4691 }
4692 BGE_UNLOCK(sc);
4693 break;
4694 case SIOCSIFFLAGS:
4695 BGE_LOCK(sc);
4696 if (ifp->if_flags & IFF_UP) {
4697 /*
4698 * If only the state of the PROMISC flag changed,
4699 * then just use the 'set promisc mode' command
4700 * instead of reinitializing the entire NIC. Doing
4701 * a full re-init means reloading the firmware and
4702 * waiting for it to start up, which may take a
4703 * second or two. Similarly for ALLMULTI.
4704 */
4705 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4706 flags = ifp->if_flags ^ sc->bge_if_flags;
4707 if (flags & IFF_PROMISC)
4708 bge_setpromisc(sc);
4709 if (flags & IFF_ALLMULTI)
4710 bge_setmulti(sc);
4711 } else
4712 bge_init_locked(sc);
4713 } else {
4714 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4715 bge_stop(sc);
4716 }
4717 }
4718 sc->bge_if_flags = ifp->if_flags;
4719 BGE_UNLOCK(sc);
4720 error = 0;
4721 break;
4722 case SIOCADDMULTI:
4723 case SIOCDELMULTI:
4724 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4725 BGE_LOCK(sc);
4726 bge_setmulti(sc);
4727 BGE_UNLOCK(sc);
4728 error = 0;
4729 }
4730 break;
4731 case SIOCSIFMEDIA:
4732 case SIOCGIFMEDIA:
4733 if (sc->bge_flags & BGE_FLAG_TBI) {
4734 error = ifmedia_ioctl(ifp, ifr,
4735 &sc->bge_ifmedia, command);
4736 } else {
4737 mii = device_get_softc(sc->bge_miibus);
4738 error = ifmedia_ioctl(ifp, ifr,
4739 &mii->mii_media, command);
4740 }
4741 break;
4742 case SIOCSIFCAP:
4743 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4744#ifdef DEVICE_POLLING
4745 if (mask & IFCAP_POLLING) {
4746 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4747 error = ether_poll_register(bge_poll, ifp);
4748 if (error)
4749 return (error);
4750 BGE_LOCK(sc);
4751 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4752 BGE_PCIMISCCTL_MASK_PCI_INTR);
4753 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4754 ifp->if_capenable |= IFCAP_POLLING;
4755 BGE_UNLOCK(sc);
4756 } else {
4757 error = ether_poll_deregister(ifp);
4758 /* Enable interrupt even in error case */
4759 BGE_LOCK(sc);
4760 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4761 BGE_PCIMISCCTL_MASK_PCI_INTR);
4762 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4763 ifp->if_capenable &= ~IFCAP_POLLING;
4764 BGE_UNLOCK(sc);
4765 }
4766 }
4767#endif
4768 if ((mask & IFCAP_TXCSUM) != 0 &&
4769 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
4770 ifp->if_capenable ^= IFCAP_TXCSUM;
4771 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
4772 ifp->if_hwassist |= sc->bge_csum_features;
4773 else
4774 ifp->if_hwassist &= ~sc->bge_csum_features;
4775 }
4776
4777 if ((mask & IFCAP_RXCSUM) != 0 &&
4778 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
4779 ifp->if_capenable ^= IFCAP_RXCSUM;
4780
4781 if ((mask & IFCAP_TSO4) != 0 &&
4782 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4783 ifp->if_capenable ^= IFCAP_TSO4;
4784 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4785 ifp->if_hwassist |= CSUM_TSO;
4786 else
4787 ifp->if_hwassist &= ~CSUM_TSO;
4788 }
4789
4790 if (mask & IFCAP_VLAN_MTU) {
4791 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4792 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4793 bge_init(sc);
4794 }
4795
4796 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
4797 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
4798 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4799 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
4800 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
4801 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4802 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
4803 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
4804 BGE_LOCK(sc);
4805 bge_setvlan(sc);
4806 BGE_UNLOCK(sc);
4807 }
4808#ifdef VLAN_CAPABILITIES
4809 VLAN_CAPABILITIES(ifp);
4810#endif
4811 break;
4812 default:
4813 error = ether_ioctl(ifp, command, data);
4814 break;
4815 }
4816
4817 return (error);
4818}
4819
4820static void
4821bge_watchdog(struct bge_softc *sc)
4822{
4823 struct ifnet *ifp;
4824
4825 BGE_LOCK_ASSERT(sc);
4826
4827 if (sc->bge_timer == 0 || --sc->bge_timer)
4828 return;
4829
4830 ifp = sc->bge_ifp;
4831
4832 if_printf(ifp, "watchdog timeout -- resetting\n");
4833
4834 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4835 bge_init_locked(sc);
4836
4837 ifp->if_oerrors++;
4838}
4839
4840/*
4841 * Stop the adapter and free any mbufs allocated to the
4842 * RX and TX lists.
4843 */
4844static void
4845bge_stop(struct bge_softc *sc)
4846{
4847 struct ifnet *ifp;
4848
4849 BGE_LOCK_ASSERT(sc);
4850
4851 ifp = sc->bge_ifp;
4852
4853 callout_stop(&sc->bge_stat_ch);
4854
4855 /* Disable host interrupts. */
4856 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4857 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4858
4859 /*
4860 * Tell firmware we're shutting down.
4861 */
4862 bge_stop_fw(sc);
4863 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4864
4865 /*
4866 * Disable all of the receiver blocks.
4867 */
4868 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4869 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4870 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4871 if (!(BGE_IS_5705_PLUS(sc)))
4872 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4873 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4874 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4875 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4876
4877 /*
4878 * Disable all of the transmit blocks.
4879 */
4880 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4881 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4882 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4883 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4884 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4885 if (!(BGE_IS_5705_PLUS(sc)))
4886 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4887 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4888
4889 /*
4890 * Shut down all of the memory managers and related
4891 * state machines.
4892 */
4893 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4894 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4895 if (!(BGE_IS_5705_PLUS(sc)))
4896 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4897 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4898 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4899 if (!(BGE_IS_5705_PLUS(sc))) {
4900 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4901 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4902 }
4903 /* Update MAC statistics. */
4904 if (BGE_IS_5705_PLUS(sc))
4905 bge_stats_update_regs(sc);
4906
4907 bge_reset(sc);
4908 bge_sig_legacy(sc, BGE_RESET_STOP);
4909 bge_sig_post_reset(sc, BGE_RESET_STOP);
4910
4911 /*
4912 * Keep the ASF firmware running if up.
4913 */
4914 if (sc->bge_asf_mode & ASF_STACKUP)
4915 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4916 else
4917 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4918
4919 /* Free the RX lists. */
4920 bge_free_rx_ring_std(sc);
4921
4922 /* Free jumbo RX list. */
4923 if (BGE_IS_JUMBO_CAPABLE(sc))
4924 bge_free_rx_ring_jumbo(sc);
4925
4926 /* Free TX buffers. */
4927 bge_free_tx_ring(sc);
4928
4929 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4930
4931 /* Clear MAC's link state (PHY may still have link UP). */
4932 if (bootverbose && sc->bge_link)
4933 if_printf(sc->bge_ifp, "link DOWN\n");
4934 sc->bge_link = 0;
4935
4936 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4937}
4938
4939/*
4940 * Stop all chip I/O so that the kernel's probe routines don't
4941 * get confused by errant DMAs when rebooting.
4942 */
4943static int
4944bge_shutdown(device_t dev)
4945{
4946 struct bge_softc *sc;
4947
4948 sc = device_get_softc(dev);
4949 BGE_LOCK(sc);
4950 bge_stop(sc);
4951 bge_reset(sc);
4952 BGE_UNLOCK(sc);
4953
4954 return (0);
4955}
4956
4957static int
4958bge_suspend(device_t dev)
4959{
4960 struct bge_softc *sc;
4961
4962 sc = device_get_softc(dev);
4963 BGE_LOCK(sc);
4964 bge_stop(sc);
4965 BGE_UNLOCK(sc);
4966
4967 return (0);
4968}
4969
4970static int
4971bge_resume(device_t dev)
4972{
4973 struct bge_softc *sc;
4974 struct ifnet *ifp;
4975
4976 sc = device_get_softc(dev);
4977 BGE_LOCK(sc);
4978 ifp = sc->bge_ifp;
4979 if (ifp->if_flags & IFF_UP) {
4980 bge_init_locked(sc);
4981 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4982 bge_start_locked(ifp);
4983 }
4984 BGE_UNLOCK(sc);
4985
4986 return (0);
4987}
4988
4989static void
4990bge_link_upd(struct bge_softc *sc)
4991{
4992 struct mii_data *mii;
4993 uint32_t link, status;
4994
4995 BGE_LOCK_ASSERT(sc);
4996
4997 /* Clear 'pending link event' flag. */
4998 sc->bge_link_evt = 0;
4999
5000 /*
5001 * Process link state changes.
5002 * Grrr. The link status word in the status block does
5003 * not work correctly on the BCM5700 rev AX and BX chips,
5004 * according to all available information. Hence, we have
5005 * to enable MII interrupts in order to properly obtain
5006 * async link changes. Unfortunately, this also means that
5007 * we have to read the MAC status register to detect link
5008 * changes, thereby adding an additional register access to
5009 * the interrupt handler.
5010 *
5011 * XXX: perhaps link state detection procedure used for
5012 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5013 */
5014
5015 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5016 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5017 status = CSR_READ_4(sc, BGE_MAC_STS);
5018 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5019 mii = device_get_softc(sc->bge_miibus);
5020 mii_pollstat(mii);
5021 if (!sc->bge_link &&
5022 mii->mii_media_status & IFM_ACTIVE &&
5023 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5024 sc->bge_link++;
5025 if (bootverbose)
5026 if_printf(sc->bge_ifp, "link UP\n");
5027 } else if (sc->bge_link &&
5028 (!(mii->mii_media_status & IFM_ACTIVE) ||
5029 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5030 sc->bge_link = 0;
5031 if (bootverbose)
5032 if_printf(sc->bge_ifp, "link DOWN\n");
5033 }
5034
5035 /* Clear the interrupt. */
5036 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5037 BGE_EVTENB_MI_INTERRUPT);
5038 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5039 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5040 BRGPHY_INTRS);
5041 }
5042 return;
5043 }
5044
5045 if (sc->bge_flags & BGE_FLAG_TBI) {
5046 status = CSR_READ_4(sc, BGE_MAC_STS);
5047 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5048 if (!sc->bge_link) {
5049 sc->bge_link++;
5050 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5051 BGE_CLRBIT(sc, BGE_MAC_MODE,
5052 BGE_MACMODE_TBI_SEND_CFGS);
5053 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5054 if (bootverbose)
5055 if_printf(sc->bge_ifp, "link UP\n");
5056 if_link_state_change(sc->bge_ifp,
5057 LINK_STATE_UP);
5058 }
5059 } else if (sc->bge_link) {
5060 sc->bge_link = 0;
5061 if (bootverbose)
5062 if_printf(sc->bge_ifp, "link DOWN\n");
5063 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5064 }
5065 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5066 /*
5067 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5068 * in status word always set. Workaround this bug by reading
5069 * PHY link status directly.
5070 */
5071 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5072
5073 if (link != sc->bge_link ||
5074 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5075 mii = device_get_softc(sc->bge_miibus);
5076 mii_pollstat(mii);
5077 if (!sc->bge_link &&
5078 mii->mii_media_status & IFM_ACTIVE &&
5079 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5080 sc->bge_link++;
5081 if (bootverbose)
5082 if_printf(sc->bge_ifp, "link UP\n");
5083 } else if (sc->bge_link &&
5084 (!(mii->mii_media_status & IFM_ACTIVE) ||
5085 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5086 sc->bge_link = 0;
5087 if (bootverbose)
5088 if_printf(sc->bge_ifp, "link DOWN\n");
5089 }
5090 }
5091 } else {
5092 /*
5093 * For controllers that call mii_tick, we have to poll
5094 * link status.
5095 */
5096 mii = device_get_softc(sc->bge_miibus);
5097 mii_pollstat(mii);
5098 bge_miibus_statchg(sc->bge_dev);
5099 }
5100
5101 /* Clear the attention. */
5102 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5103 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5104 BGE_MACSTAT_LINK_CHANGED);
5105}
5106
5107static void
5108bge_add_sysctls(struct bge_softc *sc)
5109{
5110 struct sysctl_ctx_list *ctx;
5111 struct sysctl_oid_list *children;
5112 char tn[32];
5113 int unit;
5114
5115 ctx = device_get_sysctl_ctx(sc->bge_dev);
5116 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5117
5118#ifdef BGE_REGISTER_DEBUG
5119 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5120 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5121 "Debug Information");
5122
5123 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5124 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5125 "Register Read");
5126
5127 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5128 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5129 "Memory Read");
5130
5131#endif
5132
5133 unit = device_get_unit(sc->bge_dev);
5134 /*
5135 * A common design characteristic for many Broadcom client controllers
5136 * is that they only support a single outstanding DMA read operation
5137 * on the PCIe bus. This means that it will take twice as long to fetch
5138 * a TX frame that is split into header and payload buffers as it does
5139 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5140 * these controllers, coalescing buffers to reduce the number of memory
5141 * reads is effective way to get maximum performance(about 940Mbps).
5142 * Without collapsing TX buffers the maximum TCP bulk transfer
5143 * performance is about 850Mbps. However forcing coalescing mbufs
5144 * consumes a lot of CPU cycles, so leave it off by default.
5145 */
5146 sc->bge_forced_collapse = 0;
5147 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5148 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5149 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5150 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5151 "Number of fragmented TX buffers of a frame allowed before "
5152 "forced collapsing");
5153
5154 /*
5155 * It seems all Broadcom controllers have a bug that can generate UDP
5156 * datagrams with checksum value 0 when TX UDP checksum offloading is
5157 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5158 * Even though the probability of generating such UDP datagrams is
5159 * low, I don't want to see FreeBSD boxes to inject such datagrams
5160 * into network so disable UDP checksum offloading by default. Users
5161 * still override this behavior by setting a sysctl variable,
5162 * dev.bge.0.forced_udpcsum.
5163 */
5164 sc->bge_forced_udpcsum = 0;
5165 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5166 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5167 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5168 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5169 "Enable UDP checksum offloading even if controller can "
5170 "generate UDP checksum value 0");
5171
5172 if (BGE_IS_5705_PLUS(sc))
5173 bge_add_sysctl_stats_regs(sc, ctx, children);
5174 else
5175 bge_add_sysctl_stats(sc, ctx, children);
5176}
5177
5178#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5179 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5180 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5181 desc)
5182
5183static void
5184bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5185 struct sysctl_oid_list *parent)
5186{
5187 struct sysctl_oid *tree;
5188 struct sysctl_oid_list *children, *schildren;
5189
5190 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5191 NULL, "BGE Statistics");
5192 schildren = children = SYSCTL_CHILDREN(tree);
5193 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5194 children, COSFramesDroppedDueToFilters,
5195 "FramesDroppedDueToFilters");
5196 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5197 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5198 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5199 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5200 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5201 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5202 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5203 children, ifInDiscards, "InputDiscards");
5204 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5205 children, ifInErrors, "InputErrors");
5206 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5207 children, nicRecvThresholdHit, "RecvThresholdHit");
5208 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5209 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5210 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5211 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5212 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5213 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5214 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5215 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5216 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5217 children, nicRingStatusUpdate, "RingStatusUpdate");
5218 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5219 children, nicInterrupts, "Interrupts");
5220 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5221 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5222 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5223 children, nicSendThresholdHit, "SendThresholdHit");
5224
5225 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5226 NULL, "BGE RX Statistics");
5227 children = SYSCTL_CHILDREN(tree);
5228 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5229 children, rxstats.ifHCInOctets, "ifHCInOctets");
5230 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5231 children, rxstats.etherStatsFragments, "Fragments");
5232 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5233 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5234 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5235 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5236 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5237 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5238 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5239 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5240 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5241 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5242 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5243 children, rxstats.xoffPauseFramesReceived,
5244 "xoffPauseFramesReceived");
5245 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5246 children, rxstats.macControlFramesReceived,
5247 "ControlFramesReceived");
5248 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5249 children, rxstats.xoffStateEntered, "xoffStateEntered");
5250 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5251 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5252 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5253 children, rxstats.etherStatsJabbers, "Jabbers");
5254 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5255 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5256 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5257 children, rxstats.inRangeLengthError, "inRangeLengthError");
5258 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5259 children, rxstats.outRangeLengthError, "outRangeLengthError");
5260
5261 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5262 NULL, "BGE TX Statistics");
5263 children = SYSCTL_CHILDREN(tree);
5264 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5265 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5266 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5267 children, txstats.etherStatsCollisions, "Collisions");
5268 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5269 children, txstats.outXonSent, "XonSent");
5270 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5271 children, txstats.outXoffSent, "XoffSent");
5272 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5273 children, txstats.flowControlDone, "flowControlDone");
5274 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5275 children, txstats.dot3StatsInternalMacTransmitErrors,
5276 "InternalMacTransmitErrors");
5277 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5278 children, txstats.dot3StatsSingleCollisionFrames,
5279 "SingleCollisionFrames");
5280 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5281 children, txstats.dot3StatsMultipleCollisionFrames,
5282 "MultipleCollisionFrames");
5283 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5284 children, txstats.dot3StatsDeferredTransmissions,
5285 "DeferredTransmissions");
5286 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5287 children, txstats.dot3StatsExcessiveCollisions,
5288 "ExcessiveCollisions");
5289 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5290 children, txstats.dot3StatsLateCollisions,
5291 "LateCollisions");
5292 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5293 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5294 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5295 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5296 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5297 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5298 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5299 children, txstats.dot3StatsCarrierSenseErrors,
5300 "CarrierSenseErrors");
5301 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5302 children, txstats.ifOutDiscards, "Discards");
5303 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5304 children, txstats.ifOutErrors, "Errors");
5305}
5306
5307#undef BGE_SYSCTL_STAT
5308
5309#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5310 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5311
5312static void
5313bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5314 struct sysctl_oid_list *parent)
5315{
5316 struct sysctl_oid *tree;
5317 struct sysctl_oid_list *child, *schild;
5318 struct bge_mac_stats *stats;
5319
5320 stats = &sc->bge_mac_stats;
5321 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5322 NULL, "BGE Statistics");
5323 schild = child = SYSCTL_CHILDREN(tree);
5324 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5325 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5326 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5327 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5328 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5329 &stats->DmaWriteHighPriQueueFull,
5330 "NIC DMA Write High Priority Queue Full");
5331 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5332 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5333 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5334 &stats->InputDiscards, "Discarded Input Frames");
5335 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5336 &stats->InputErrors, "Input Errors");
5337 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5338 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5339
5340 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5341 NULL, "BGE RX Statistics");
5342 child = SYSCTL_CHILDREN(tree);
5343 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5344 &stats->ifHCInOctets, "Inbound Octets");
5345 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5346 &stats->etherStatsFragments, "Fragments");
5347 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5348 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5349 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5350 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5351 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5352 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5353 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5354 &stats->dot3StatsFCSErrors, "FCS Errors");
5355 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5356 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5357 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5358 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5359 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5360 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5361 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5362 &stats->macControlFramesReceived, "MAC Control Frames Received");
5363 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5364 &stats->xoffStateEntered, "XOFF State Entered");
5365 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5366 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5367 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5368 &stats->etherStatsJabbers, "Jabbers");
5369 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5370 &stats->etherStatsUndersizePkts, "Undersized Packets");
5371
5372 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5373 NULL, "BGE TX Statistics");
5374 child = SYSCTL_CHILDREN(tree);
5375 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5376 &stats->ifHCOutOctets, "Outbound Octets");
5377 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5378 &stats->etherStatsCollisions, "TX Collisions");
5379 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5380 &stats->outXonSent, "XON Sent");
5381 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5382 &stats->outXoffSent, "XOFF Sent");
5383 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5384 &stats->dot3StatsInternalMacTransmitErrors,
5385 "Internal MAC TX Errors");
5386 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5387 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5388 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5389 &stats->dot3StatsMultipleCollisionFrames,
5390 "Multiple Collision Frames");
5391 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5392 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5393 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5394 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5395 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5396 &stats->dot3StatsLateCollisions, "Late Collisions");
5397 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5398 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5399 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5400 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5401 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5402 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5403}
5404
5405#undef BGE_SYSCTL_STAT_ADD64
5406
5407static int
5408bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5409{
5410 struct bge_softc *sc;
5411 uint32_t result;
5412 int offset;
5413
5414 sc = (struct bge_softc *)arg1;
5415 offset = arg2;
5416 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5417 offsetof(bge_hostaddr, bge_addr_lo));
5418 return (sysctl_handle_int(oidp, &result, 0, req));
5419}
5420
5421#ifdef BGE_REGISTER_DEBUG
5422static int
5423bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5424{
5425 struct bge_softc *sc;
5426 uint16_t *sbdata;
5427 int error;
5428 int result;
5429 int i, j;
5430
5431 result = -1;
5432 error = sysctl_handle_int(oidp, &result, 0, req);
5433 if (error || (req->newptr == NULL))
5434 return (error);
5435
5436 if (result == 1) {
5437 sc = (struct bge_softc *)arg1;
5438
5439 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5440 printf("Status Block:\n");
5441 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5442 printf("%06x:", i);
5443 for (j = 0; j < 8; j++) {
5444 printf(" %04x", sbdata[i]);
5445 i += 4;
5446 }
5447 printf("\n");
5448 }
5449
5450 printf("Registers:\n");
5451 for (i = 0x800; i < 0xA00; ) {
5452 printf("%06x:", i);
5453 for (j = 0; j < 8; j++) {
5454 printf(" %08x", CSR_READ_4(sc, i));
5455 i += 4;
5456 }
5457 printf("\n");
5458 }
5459
5460 printf("Hardware Flags:\n");
5461 if (BGE_IS_5755_PLUS(sc))
5462 printf(" - 5755 Plus\n");
5463 if (BGE_IS_575X_PLUS(sc))
5464 printf(" - 575X Plus\n");
5465 if (BGE_IS_5705_PLUS(sc))
5466 printf(" - 5705 Plus\n");
5467 if (BGE_IS_5714_FAMILY(sc))
5468 printf(" - 5714 Family\n");
5469 if (BGE_IS_5700_FAMILY(sc))
5470 printf(" - 5700 Family\n");
5471 if (sc->bge_flags & BGE_FLAG_JUMBO)
5472 printf(" - Supports Jumbo Frames\n");
5473 if (sc->bge_flags & BGE_FLAG_PCIX)
5474 printf(" - PCI-X Bus\n");
5475 if (sc->bge_flags & BGE_FLAG_PCIE)
5476 printf(" - PCI Express Bus\n");
5477 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5478 printf(" - No 3 LEDs\n");
5479 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5480 printf(" - RX Alignment Bug\n");
5481 }
5482
5483 return (error);
5484}
5485
5486static int
5487bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5488{
5489 struct bge_softc *sc;
5490 int error;
5491 uint16_t result;
5492 uint32_t val;
5493
5494 result = -1;
5495 error = sysctl_handle_int(oidp, &result, 0, req);
5496 if (error || (req->newptr == NULL))
5497 return (error);
5498
5499 if (result < 0x8000) {
5500 sc = (struct bge_softc *)arg1;
5501 val = CSR_READ_4(sc, result);
5502 printf("reg 0x%06X = 0x%08X\n", result, val);
5503 }
5504
5505 return (error);
5506}
5507
5508static int
5509bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5510{
5511 struct bge_softc *sc;
5512 int error;
5513 uint16_t result;
5514 uint32_t val;
5515
5516 result = -1;
5517 error = sysctl_handle_int(oidp, &result, 0, req);
5518 if (error || (req->newptr == NULL))
5519 return (error);
5520
5521 if (result < 0x8000) {
5522 sc = (struct bge_softc *)arg1;
5523 val = bge_readmem_ind(sc, result);
5524 printf("mem 0x%06X = 0x%08X\n", result, val);
5525 }
5526
5527 return (error);
5528}
5529#endif
5530
5531static int
5532bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5533{
5534
5535 if (sc->bge_flags & BGE_FLAG_EADDR)
5536 return (1);
5537
5538#ifdef __sparc64__
5539 OF_getetheraddr(sc->bge_dev, ether_addr);
5540 return (0);
5541#endif
5542 return (1);
5543}
5544
5545static int
5546bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5547{
5548 uint32_t mac_addr;
5549
5550 mac_addr = bge_readmem_ind(sc, 0x0c14);
5551 if ((mac_addr >> 16) == 0x484b) {
5552 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5553 ether_addr[1] = (uint8_t)mac_addr;
5554 mac_addr = bge_readmem_ind(sc, 0x0c18);
5555 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5556 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5557 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5558 ether_addr[5] = (uint8_t)mac_addr;
5559 return (0);
5560 }
5561 return (1);
5562}
5563
5564static int
5565bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5566{
5567 int mac_offset = BGE_EE_MAC_OFFSET;
5568
5569 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5570 mac_offset = BGE_EE_MAC_OFFSET_5906;
5571
5572 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5573 ETHER_ADDR_LEN));
5574}
5575
5576static int
5577bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5578{
5579
5580 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5581 return (1);
5582
5583 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5584 ETHER_ADDR_LEN));
5585}
5586
5587static int
5588bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5589{
5590 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5591 /* NOTE: Order is critical */
5592 bge_get_eaddr_fw,
5593 bge_get_eaddr_mem,
5594 bge_get_eaddr_nvram,
5595 bge_get_eaddr_eeprom,
5596 NULL
5597 };
5598 const bge_eaddr_fcn_t *func;
5599
5600 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5601 if ((*func)(sc, eaddr) == 0)
5602 break;
5603 }
5604 return (*func == NULL ? ENXIO : 0);
5605}
4105 tcp->th_sum = 0;
4106 /*
4107 * Broadcom controllers uses different descriptor format for
4108 * TSO depending on ASIC revision. Due to TSO-capable firmware
4109 * license issue and lower performance of firmware based TSO
4110 * we only support hardware based TSO which is applicable for
4111 * BCM5755 or newer controllers. Hardware based TSO uses 11
4112 * bits to store MSS and upper 5 bits are used to store IP/TCP
4113 * header length(including IP/TCP options). The header length
4114 * is expressed as 32 bits unit.
4115 */
4116 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4117 *mss |= (hlen << 11);
4118 return (m);
4119}
4120
4121/*
4122 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4123 * pointers to descriptors.
4124 */
4125static int
4126bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4127{
4128 bus_dma_segment_t segs[BGE_NSEG_NEW];
4129 bus_dmamap_t map;
4130 struct bge_tx_bd *d;
4131 struct mbuf *m = *m_head;
4132 uint32_t idx = *txidx;
4133 uint16_t csum_flags, mss, vlan_tag;
4134 int nsegs, i, error;
4135
4136 csum_flags = 0;
4137 mss = 0;
4138 vlan_tag = 0;
4139 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4140 *m_head = m = bge_setup_tso(sc, m, &mss);
4141 if (*m_head == NULL)
4142 return (ENOBUFS);
4143 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4144 BGE_TXBDFLAG_CPU_POST_DMA;
4145 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4146 if (m->m_pkthdr.csum_flags & CSUM_IP)
4147 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4148 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4149 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4150 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4151 (error = bge_cksum_pad(m)) != 0) {
4152 m_freem(m);
4153 *m_head = NULL;
4154 return (error);
4155 }
4156 }
4157 if (m->m_flags & M_LASTFRAG)
4158 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4159 else if (m->m_flags & M_FRAG)
4160 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4161 }
4162
4163 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
4164 sc->bge_forced_collapse > 0 &&
4165 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4166 /*
4167 * Forcedly collapse mbuf chains to overcome hardware
4168 * limitation which only support a single outstanding
4169 * DMA read operation.
4170 */
4171 if (sc->bge_forced_collapse == 1)
4172 m = m_defrag(m, M_DONTWAIT);
4173 else
4174 m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
4175 if (m == NULL)
4176 m = *m_head;
4177 *m_head = m;
4178 }
4179
4180 map = sc->bge_cdata.bge_tx_dmamap[idx];
4181 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4182 &nsegs, BUS_DMA_NOWAIT);
4183 if (error == EFBIG) {
4184 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4185 if (m == NULL) {
4186 m_freem(*m_head);
4187 *m_head = NULL;
4188 return (ENOBUFS);
4189 }
4190 *m_head = m;
4191 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4192 m, segs, &nsegs, BUS_DMA_NOWAIT);
4193 if (error) {
4194 m_freem(m);
4195 *m_head = NULL;
4196 return (error);
4197 }
4198 } else if (error != 0)
4199 return (error);
4200
4201 /* Check if we have enough free send BDs. */
4202 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4203 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4204 return (ENOBUFS);
4205 }
4206
4207 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4208
4209#if __FreeBSD_version > 700022
4210 if (m->m_flags & M_VLANTAG) {
4211 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4212 vlan_tag = m->m_pkthdr.ether_vtag;
4213 }
4214#else
4215 {
4216 struct m_tag *mtag;
4217
4218 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4219 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4220 vlan_tag = VLAN_TAG_VALUE(mtag);
4221 }
4222 }
4223#endif
4224 for (i = 0; ; i++) {
4225 d = &sc->bge_ldata.bge_tx_ring[idx];
4226 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4227 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4228 d->bge_len = segs[i].ds_len;
4229 d->bge_flags = csum_flags;
4230 d->bge_vlan_tag = vlan_tag;
4231 d->bge_mss = mss;
4232 if (i == nsegs - 1)
4233 break;
4234 BGE_INC(idx, BGE_TX_RING_CNT);
4235 }
4236
4237 /* Mark the last segment as end of packet... */
4238 d->bge_flags |= BGE_TXBDFLAG_END;
4239
4240 /*
4241 * Insure that the map for this transmission
4242 * is placed at the array index of the last descriptor
4243 * in this chain.
4244 */
4245 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4246 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4247 sc->bge_cdata.bge_tx_chain[idx] = m;
4248 sc->bge_txcnt += nsegs;
4249
4250 BGE_INC(idx, BGE_TX_RING_CNT);
4251 *txidx = idx;
4252
4253 return (0);
4254}
4255
4256/*
4257 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4258 * to the mbuf data regions directly in the transmit descriptors.
4259 */
4260static void
4261bge_start_locked(struct ifnet *ifp)
4262{
4263 struct bge_softc *sc;
4264 struct mbuf *m_head;
4265 uint32_t prodidx;
4266 int count;
4267
4268 sc = ifp->if_softc;
4269 BGE_LOCK_ASSERT(sc);
4270
4271 if (!sc->bge_link ||
4272 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4273 IFF_DRV_RUNNING)
4274 return;
4275
4276 prodidx = sc->bge_tx_prodidx;
4277
4278 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4279 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4280 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4281 break;
4282 }
4283 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4284 if (m_head == NULL)
4285 break;
4286
4287 /*
4288 * XXX
4289 * The code inside the if() block is never reached since we
4290 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4291 * requests to checksum TCP/UDP in a fragmented packet.
4292 *
4293 * XXX
4294 * safety overkill. If this is a fragmented packet chain
4295 * with delayed TCP/UDP checksums, then only encapsulate
4296 * it if we have enough descriptors to handle the entire
4297 * chain at once.
4298 * (paranoia -- may not actually be needed)
4299 */
4300 if (m_head->m_flags & M_FIRSTFRAG &&
4301 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4302 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4303 m_head->m_pkthdr.csum_data + 16) {
4304 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4305 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4306 break;
4307 }
4308 }
4309
4310 /*
4311 * Pack the data into the transmit ring. If we
4312 * don't have room, set the OACTIVE flag and wait
4313 * for the NIC to drain the ring.
4314 */
4315 if (bge_encap(sc, &m_head, &prodidx)) {
4316 if (m_head == NULL)
4317 break;
4318 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4319 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4320 break;
4321 }
4322 ++count;
4323
4324 /*
4325 * If there's a BPF listener, bounce a copy of this frame
4326 * to him.
4327 */
4328#ifdef ETHER_BPF_MTAP
4329 ETHER_BPF_MTAP(ifp, m_head);
4330#else
4331 BPF_MTAP(ifp, m_head);
4332#endif
4333 }
4334
4335 if (count > 0) {
4336 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4337 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4338 /* Transmit. */
4339 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4340 /* 5700 b2 errata */
4341 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4342 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4343
4344 sc->bge_tx_prodidx = prodidx;
4345
4346 /*
4347 * Set a timeout in case the chip goes out to lunch.
4348 */
4349 sc->bge_timer = 5;
4350 }
4351}
4352
4353/*
4354 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4355 * to the mbuf data regions directly in the transmit descriptors.
4356 */
4357static void
4358bge_start(struct ifnet *ifp)
4359{
4360 struct bge_softc *sc;
4361
4362 sc = ifp->if_softc;
4363 BGE_LOCK(sc);
4364 bge_start_locked(ifp);
4365 BGE_UNLOCK(sc);
4366}
4367
4368static void
4369bge_init_locked(struct bge_softc *sc)
4370{
4371 struct ifnet *ifp;
4372 uint16_t *m;
4373
4374 BGE_LOCK_ASSERT(sc);
4375
4376 ifp = sc->bge_ifp;
4377
4378 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4379 return;
4380
4381 /* Cancel pending I/O and flush buffers. */
4382 bge_stop(sc);
4383
4384 bge_stop_fw(sc);
4385 bge_sig_pre_reset(sc, BGE_RESET_START);
4386 bge_reset(sc);
4387 bge_sig_legacy(sc, BGE_RESET_START);
4388 bge_sig_post_reset(sc, BGE_RESET_START);
4389
4390 bge_chipinit(sc);
4391
4392 /*
4393 * Init the various state machines, ring
4394 * control blocks and firmware.
4395 */
4396 if (bge_blockinit(sc)) {
4397 device_printf(sc->bge_dev, "initialization failure\n");
4398 return;
4399 }
4400
4401 ifp = sc->bge_ifp;
4402
4403 /* Specify MTU. */
4404 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4405 ETHER_HDR_LEN + ETHER_CRC_LEN +
4406 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4407
4408 /* Load our MAC address. */
4409 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4410 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4411 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4412
4413 /* Program promiscuous mode. */
4414 bge_setpromisc(sc);
4415
4416 /* Program multicast filter. */
4417 bge_setmulti(sc);
4418
4419 /* Program VLAN tag stripping. */
4420 bge_setvlan(sc);
4421
4422 /* Override UDP checksum offloading. */
4423 if (sc->bge_forced_udpcsum == 0)
4424 sc->bge_csum_features &= ~CSUM_UDP;
4425 else
4426 sc->bge_csum_features |= CSUM_UDP;
4427 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4428 ifp->if_capenable & IFCAP_TXCSUM) {
4429 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4430 ifp->if_hwassist |= sc->bge_csum_features;
4431 }
4432
4433 /* Init RX ring. */
4434 if (bge_init_rx_ring_std(sc) != 0) {
4435 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4436 bge_stop(sc);
4437 return;
4438 }
4439
4440 /*
4441 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4442 * memory to insure that the chip has in fact read the first
4443 * entry of the ring.
4444 */
4445 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4446 uint32_t v, i;
4447 for (i = 0; i < 10; i++) {
4448 DELAY(20);
4449 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4450 if (v == (MCLBYTES - ETHER_ALIGN))
4451 break;
4452 }
4453 if (i == 10)
4454 device_printf (sc->bge_dev,
4455 "5705 A0 chip failed to load RX ring\n");
4456 }
4457
4458 /* Init jumbo RX ring. */
4459 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4460 (MCLBYTES - ETHER_ALIGN)) {
4461 if (bge_init_rx_ring_jumbo(sc) != 0) {
4462 device_printf(sc->bge_dev,
4463 "no memory for jumbo Rx buffers.\n");
4464 bge_stop(sc);
4465 return;
4466 }
4467 }
4468
4469 /* Init our RX return ring index. */
4470 sc->bge_rx_saved_considx = 0;
4471
4472 /* Init our RX/TX stat counters. */
4473 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4474
4475 /* Init TX ring. */
4476 bge_init_tx_ring(sc);
4477
4478 /* Turn on transmitter. */
4479 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4480
4481 /* Turn on receiver. */
4482 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4483
4484 /*
4485 * Set the number of good frames to receive after RX MBUF
4486 * Low Watermark has been reached. After the RX MAC receives
4487 * this number of frames, it will drop subsequent incoming
4488 * frames until the MBUF High Watermark is reached.
4489 */
4490 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4491
4492 /* Clear MAC statistics. */
4493 if (BGE_IS_5705_PLUS(sc))
4494 bge_stats_clear_regs(sc);
4495
4496 /* Tell firmware we're alive. */
4497 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4498
4499#ifdef DEVICE_POLLING
4500 /* Disable interrupts if we are polling. */
4501 if (ifp->if_capenable & IFCAP_POLLING) {
4502 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4503 BGE_PCIMISCCTL_MASK_PCI_INTR);
4504 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4505 } else
4506#endif
4507
4508 /* Enable host interrupts. */
4509 {
4510 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4511 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4512 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4513 }
4514
4515 bge_ifmedia_upd_locked(ifp);
4516
4517 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4518 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4519
4520 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4521}
4522
4523static void
4524bge_init(void *xsc)
4525{
4526 struct bge_softc *sc = xsc;
4527
4528 BGE_LOCK(sc);
4529 bge_init_locked(sc);
4530 BGE_UNLOCK(sc);
4531}
4532
4533/*
4534 * Set media options.
4535 */
4536static int
4537bge_ifmedia_upd(struct ifnet *ifp)
4538{
4539 struct bge_softc *sc = ifp->if_softc;
4540 int res;
4541
4542 BGE_LOCK(sc);
4543 res = bge_ifmedia_upd_locked(ifp);
4544 BGE_UNLOCK(sc);
4545
4546 return (res);
4547}
4548
4549static int
4550bge_ifmedia_upd_locked(struct ifnet *ifp)
4551{
4552 struct bge_softc *sc = ifp->if_softc;
4553 struct mii_data *mii;
4554 struct mii_softc *miisc;
4555 struct ifmedia *ifm;
4556
4557 BGE_LOCK_ASSERT(sc);
4558
4559 ifm = &sc->bge_ifmedia;
4560
4561 /* If this is a 1000baseX NIC, enable the TBI port. */
4562 if (sc->bge_flags & BGE_FLAG_TBI) {
4563 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4564 return (EINVAL);
4565 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4566 case IFM_AUTO:
4567 /*
4568 * The BCM5704 ASIC appears to have a special
4569 * mechanism for programming the autoneg
4570 * advertisement registers in TBI mode.
4571 */
4572 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4573 uint32_t sgdig;
4574 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4575 if (sgdig & BGE_SGDIGSTS_DONE) {
4576 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4577 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4578 sgdig |= BGE_SGDIGCFG_AUTO |
4579 BGE_SGDIGCFG_PAUSE_CAP |
4580 BGE_SGDIGCFG_ASYM_PAUSE;
4581 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4582 sgdig | BGE_SGDIGCFG_SEND);
4583 DELAY(5);
4584 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4585 }
4586 }
4587 break;
4588 case IFM_1000_SX:
4589 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4590 BGE_CLRBIT(sc, BGE_MAC_MODE,
4591 BGE_MACMODE_HALF_DUPLEX);
4592 } else {
4593 BGE_SETBIT(sc, BGE_MAC_MODE,
4594 BGE_MACMODE_HALF_DUPLEX);
4595 }
4596 break;
4597 default:
4598 return (EINVAL);
4599 }
4600 return (0);
4601 }
4602
4603 sc->bge_link_evt++;
4604 mii = device_get_softc(sc->bge_miibus);
4605 if (mii->mii_instance)
4606 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4607 mii_phy_reset(miisc);
4608 mii_mediachg(mii);
4609
4610 /*
4611 * Force an interrupt so that we will call bge_link_upd
4612 * if needed and clear any pending link state attention.
4613 * Without this we are not getting any further interrupts
4614 * for link state changes and thus will not UP the link and
4615 * not be able to send in bge_start_locked. The only
4616 * way to get things working was to receive a packet and
4617 * get an RX intr.
4618 * bge_tick should help for fiber cards and we might not
4619 * need to do this here if BGE_FLAG_TBI is set but as
4620 * we poll for fiber anyway it should not harm.
4621 */
4622 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4623 sc->bge_flags & BGE_FLAG_5788)
4624 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4625 else
4626 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4627
4628 return (0);
4629}
4630
4631/*
4632 * Report current media status.
4633 */
4634static void
4635bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4636{
4637 struct bge_softc *sc = ifp->if_softc;
4638 struct mii_data *mii;
4639
4640 BGE_LOCK(sc);
4641
4642 if (sc->bge_flags & BGE_FLAG_TBI) {
4643 ifmr->ifm_status = IFM_AVALID;
4644 ifmr->ifm_active = IFM_ETHER;
4645 if (CSR_READ_4(sc, BGE_MAC_STS) &
4646 BGE_MACSTAT_TBI_PCS_SYNCHED)
4647 ifmr->ifm_status |= IFM_ACTIVE;
4648 else {
4649 ifmr->ifm_active |= IFM_NONE;
4650 BGE_UNLOCK(sc);
4651 return;
4652 }
4653 ifmr->ifm_active |= IFM_1000_SX;
4654 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4655 ifmr->ifm_active |= IFM_HDX;
4656 else
4657 ifmr->ifm_active |= IFM_FDX;
4658 BGE_UNLOCK(sc);
4659 return;
4660 }
4661
4662 mii = device_get_softc(sc->bge_miibus);
4663 mii_pollstat(mii);
4664 ifmr->ifm_active = mii->mii_media_active;
4665 ifmr->ifm_status = mii->mii_media_status;
4666
4667 BGE_UNLOCK(sc);
4668}
4669
4670static int
4671bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4672{
4673 struct bge_softc *sc = ifp->if_softc;
4674 struct ifreq *ifr = (struct ifreq *) data;
4675 struct mii_data *mii;
4676 int flags, mask, error = 0;
4677
4678 switch (command) {
4679 case SIOCSIFMTU:
4680 BGE_LOCK(sc);
4681 if (ifr->ifr_mtu < ETHERMIN ||
4682 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4683 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4684 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4685 ifr->ifr_mtu > ETHERMTU))
4686 error = EINVAL;
4687 else if (ifp->if_mtu != ifr->ifr_mtu) {
4688 ifp->if_mtu = ifr->ifr_mtu;
4689 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4690 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4691 bge_init_locked(sc);
4692 }
4693 }
4694 BGE_UNLOCK(sc);
4695 break;
4696 case SIOCSIFFLAGS:
4697 BGE_LOCK(sc);
4698 if (ifp->if_flags & IFF_UP) {
4699 /*
4700 * If only the state of the PROMISC flag changed,
4701 * then just use the 'set promisc mode' command
4702 * instead of reinitializing the entire NIC. Doing
4703 * a full re-init means reloading the firmware and
4704 * waiting for it to start up, which may take a
4705 * second or two. Similarly for ALLMULTI.
4706 */
4707 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4708 flags = ifp->if_flags ^ sc->bge_if_flags;
4709 if (flags & IFF_PROMISC)
4710 bge_setpromisc(sc);
4711 if (flags & IFF_ALLMULTI)
4712 bge_setmulti(sc);
4713 } else
4714 bge_init_locked(sc);
4715 } else {
4716 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4717 bge_stop(sc);
4718 }
4719 }
4720 sc->bge_if_flags = ifp->if_flags;
4721 BGE_UNLOCK(sc);
4722 error = 0;
4723 break;
4724 case SIOCADDMULTI:
4725 case SIOCDELMULTI:
4726 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4727 BGE_LOCK(sc);
4728 bge_setmulti(sc);
4729 BGE_UNLOCK(sc);
4730 error = 0;
4731 }
4732 break;
4733 case SIOCSIFMEDIA:
4734 case SIOCGIFMEDIA:
4735 if (sc->bge_flags & BGE_FLAG_TBI) {
4736 error = ifmedia_ioctl(ifp, ifr,
4737 &sc->bge_ifmedia, command);
4738 } else {
4739 mii = device_get_softc(sc->bge_miibus);
4740 error = ifmedia_ioctl(ifp, ifr,
4741 &mii->mii_media, command);
4742 }
4743 break;
4744 case SIOCSIFCAP:
4745 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4746#ifdef DEVICE_POLLING
4747 if (mask & IFCAP_POLLING) {
4748 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4749 error = ether_poll_register(bge_poll, ifp);
4750 if (error)
4751 return (error);
4752 BGE_LOCK(sc);
4753 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4754 BGE_PCIMISCCTL_MASK_PCI_INTR);
4755 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4756 ifp->if_capenable |= IFCAP_POLLING;
4757 BGE_UNLOCK(sc);
4758 } else {
4759 error = ether_poll_deregister(ifp);
4760 /* Enable interrupt even in error case */
4761 BGE_LOCK(sc);
4762 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4763 BGE_PCIMISCCTL_MASK_PCI_INTR);
4764 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4765 ifp->if_capenable &= ~IFCAP_POLLING;
4766 BGE_UNLOCK(sc);
4767 }
4768 }
4769#endif
4770 if ((mask & IFCAP_TXCSUM) != 0 &&
4771 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
4772 ifp->if_capenable ^= IFCAP_TXCSUM;
4773 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
4774 ifp->if_hwassist |= sc->bge_csum_features;
4775 else
4776 ifp->if_hwassist &= ~sc->bge_csum_features;
4777 }
4778
4779 if ((mask & IFCAP_RXCSUM) != 0 &&
4780 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
4781 ifp->if_capenable ^= IFCAP_RXCSUM;
4782
4783 if ((mask & IFCAP_TSO4) != 0 &&
4784 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4785 ifp->if_capenable ^= IFCAP_TSO4;
4786 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4787 ifp->if_hwassist |= CSUM_TSO;
4788 else
4789 ifp->if_hwassist &= ~CSUM_TSO;
4790 }
4791
4792 if (mask & IFCAP_VLAN_MTU) {
4793 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4794 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4795 bge_init(sc);
4796 }
4797
4798 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
4799 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
4800 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4801 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
4802 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
4803 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4804 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
4805 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
4806 BGE_LOCK(sc);
4807 bge_setvlan(sc);
4808 BGE_UNLOCK(sc);
4809 }
4810#ifdef VLAN_CAPABILITIES
4811 VLAN_CAPABILITIES(ifp);
4812#endif
4813 break;
4814 default:
4815 error = ether_ioctl(ifp, command, data);
4816 break;
4817 }
4818
4819 return (error);
4820}
4821
4822static void
4823bge_watchdog(struct bge_softc *sc)
4824{
4825 struct ifnet *ifp;
4826
4827 BGE_LOCK_ASSERT(sc);
4828
4829 if (sc->bge_timer == 0 || --sc->bge_timer)
4830 return;
4831
4832 ifp = sc->bge_ifp;
4833
4834 if_printf(ifp, "watchdog timeout -- resetting\n");
4835
4836 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4837 bge_init_locked(sc);
4838
4839 ifp->if_oerrors++;
4840}
4841
4842/*
4843 * Stop the adapter and free any mbufs allocated to the
4844 * RX and TX lists.
4845 */
4846static void
4847bge_stop(struct bge_softc *sc)
4848{
4849 struct ifnet *ifp;
4850
4851 BGE_LOCK_ASSERT(sc);
4852
4853 ifp = sc->bge_ifp;
4854
4855 callout_stop(&sc->bge_stat_ch);
4856
4857 /* Disable host interrupts. */
4858 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4859 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4860
4861 /*
4862 * Tell firmware we're shutting down.
4863 */
4864 bge_stop_fw(sc);
4865 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4866
4867 /*
4868 * Disable all of the receiver blocks.
4869 */
4870 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4871 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4872 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4873 if (!(BGE_IS_5705_PLUS(sc)))
4874 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4875 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4876 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4877 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4878
4879 /*
4880 * Disable all of the transmit blocks.
4881 */
4882 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4883 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4884 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4885 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4886 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4887 if (!(BGE_IS_5705_PLUS(sc)))
4888 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4889 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4890
4891 /*
4892 * Shut down all of the memory managers and related
4893 * state machines.
4894 */
4895 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4896 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4897 if (!(BGE_IS_5705_PLUS(sc)))
4898 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4899 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4900 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4901 if (!(BGE_IS_5705_PLUS(sc))) {
4902 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4903 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4904 }
4905 /* Update MAC statistics. */
4906 if (BGE_IS_5705_PLUS(sc))
4907 bge_stats_update_regs(sc);
4908
4909 bge_reset(sc);
4910 bge_sig_legacy(sc, BGE_RESET_STOP);
4911 bge_sig_post_reset(sc, BGE_RESET_STOP);
4912
4913 /*
4914 * Keep the ASF firmware running if up.
4915 */
4916 if (sc->bge_asf_mode & ASF_STACKUP)
4917 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4918 else
4919 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4920
4921 /* Free the RX lists. */
4922 bge_free_rx_ring_std(sc);
4923
4924 /* Free jumbo RX list. */
4925 if (BGE_IS_JUMBO_CAPABLE(sc))
4926 bge_free_rx_ring_jumbo(sc);
4927
4928 /* Free TX buffers. */
4929 bge_free_tx_ring(sc);
4930
4931 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4932
4933 /* Clear MAC's link state (PHY may still have link UP). */
4934 if (bootverbose && sc->bge_link)
4935 if_printf(sc->bge_ifp, "link DOWN\n");
4936 sc->bge_link = 0;
4937
4938 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4939}
4940
4941/*
4942 * Stop all chip I/O so that the kernel's probe routines don't
4943 * get confused by errant DMAs when rebooting.
4944 */
4945static int
4946bge_shutdown(device_t dev)
4947{
4948 struct bge_softc *sc;
4949
4950 sc = device_get_softc(dev);
4951 BGE_LOCK(sc);
4952 bge_stop(sc);
4953 bge_reset(sc);
4954 BGE_UNLOCK(sc);
4955
4956 return (0);
4957}
4958
4959static int
4960bge_suspend(device_t dev)
4961{
4962 struct bge_softc *sc;
4963
4964 sc = device_get_softc(dev);
4965 BGE_LOCK(sc);
4966 bge_stop(sc);
4967 BGE_UNLOCK(sc);
4968
4969 return (0);
4970}
4971
4972static int
4973bge_resume(device_t dev)
4974{
4975 struct bge_softc *sc;
4976 struct ifnet *ifp;
4977
4978 sc = device_get_softc(dev);
4979 BGE_LOCK(sc);
4980 ifp = sc->bge_ifp;
4981 if (ifp->if_flags & IFF_UP) {
4982 bge_init_locked(sc);
4983 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4984 bge_start_locked(ifp);
4985 }
4986 BGE_UNLOCK(sc);
4987
4988 return (0);
4989}
4990
4991static void
4992bge_link_upd(struct bge_softc *sc)
4993{
4994 struct mii_data *mii;
4995 uint32_t link, status;
4996
4997 BGE_LOCK_ASSERT(sc);
4998
4999 /* Clear 'pending link event' flag. */
5000 sc->bge_link_evt = 0;
5001
5002 /*
5003 * Process link state changes.
5004 * Grrr. The link status word in the status block does
5005 * not work correctly on the BCM5700 rev AX and BX chips,
5006 * according to all available information. Hence, we have
5007 * to enable MII interrupts in order to properly obtain
5008 * async link changes. Unfortunately, this also means that
5009 * we have to read the MAC status register to detect link
5010 * changes, thereby adding an additional register access to
5011 * the interrupt handler.
5012 *
5013 * XXX: perhaps link state detection procedure used for
5014 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5015 */
5016
5017 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5018 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5019 status = CSR_READ_4(sc, BGE_MAC_STS);
5020 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5021 mii = device_get_softc(sc->bge_miibus);
5022 mii_pollstat(mii);
5023 if (!sc->bge_link &&
5024 mii->mii_media_status & IFM_ACTIVE &&
5025 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5026 sc->bge_link++;
5027 if (bootverbose)
5028 if_printf(sc->bge_ifp, "link UP\n");
5029 } else if (sc->bge_link &&
5030 (!(mii->mii_media_status & IFM_ACTIVE) ||
5031 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5032 sc->bge_link = 0;
5033 if (bootverbose)
5034 if_printf(sc->bge_ifp, "link DOWN\n");
5035 }
5036
5037 /* Clear the interrupt. */
5038 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5039 BGE_EVTENB_MI_INTERRUPT);
5040 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5041 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5042 BRGPHY_INTRS);
5043 }
5044 return;
5045 }
5046
5047 if (sc->bge_flags & BGE_FLAG_TBI) {
5048 status = CSR_READ_4(sc, BGE_MAC_STS);
5049 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5050 if (!sc->bge_link) {
5051 sc->bge_link++;
5052 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5053 BGE_CLRBIT(sc, BGE_MAC_MODE,
5054 BGE_MACMODE_TBI_SEND_CFGS);
5055 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5056 if (bootverbose)
5057 if_printf(sc->bge_ifp, "link UP\n");
5058 if_link_state_change(sc->bge_ifp,
5059 LINK_STATE_UP);
5060 }
5061 } else if (sc->bge_link) {
5062 sc->bge_link = 0;
5063 if (bootverbose)
5064 if_printf(sc->bge_ifp, "link DOWN\n");
5065 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5066 }
5067 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5068 /*
5069 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5070 * in status word always set. Workaround this bug by reading
5071 * PHY link status directly.
5072 */
5073 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5074
5075 if (link != sc->bge_link ||
5076 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5077 mii = device_get_softc(sc->bge_miibus);
5078 mii_pollstat(mii);
5079 if (!sc->bge_link &&
5080 mii->mii_media_status & IFM_ACTIVE &&
5081 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5082 sc->bge_link++;
5083 if (bootverbose)
5084 if_printf(sc->bge_ifp, "link UP\n");
5085 } else if (sc->bge_link &&
5086 (!(mii->mii_media_status & IFM_ACTIVE) ||
5087 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5088 sc->bge_link = 0;
5089 if (bootverbose)
5090 if_printf(sc->bge_ifp, "link DOWN\n");
5091 }
5092 }
5093 } else {
5094 /*
5095 * For controllers that call mii_tick, we have to poll
5096 * link status.
5097 */
5098 mii = device_get_softc(sc->bge_miibus);
5099 mii_pollstat(mii);
5100 bge_miibus_statchg(sc->bge_dev);
5101 }
5102
5103 /* Clear the attention. */
5104 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5105 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5106 BGE_MACSTAT_LINK_CHANGED);
5107}
5108
5109static void
5110bge_add_sysctls(struct bge_softc *sc)
5111{
5112 struct sysctl_ctx_list *ctx;
5113 struct sysctl_oid_list *children;
5114 char tn[32];
5115 int unit;
5116
5117 ctx = device_get_sysctl_ctx(sc->bge_dev);
5118 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5119
5120#ifdef BGE_REGISTER_DEBUG
5121 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5122 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5123 "Debug Information");
5124
5125 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5126 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5127 "Register Read");
5128
5129 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5130 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5131 "Memory Read");
5132
5133#endif
5134
5135 unit = device_get_unit(sc->bge_dev);
5136 /*
5137 * A common design characteristic for many Broadcom client controllers
5138 * is that they only support a single outstanding DMA read operation
5139 * on the PCIe bus. This means that it will take twice as long to fetch
5140 * a TX frame that is split into header and payload buffers as it does
5141 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5142 * these controllers, coalescing buffers to reduce the number of memory
5143 * reads is effective way to get maximum performance(about 940Mbps).
5144 * Without collapsing TX buffers the maximum TCP bulk transfer
5145 * performance is about 850Mbps. However forcing coalescing mbufs
5146 * consumes a lot of CPU cycles, so leave it off by default.
5147 */
5148 sc->bge_forced_collapse = 0;
5149 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5150 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5151 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5152 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5153 "Number of fragmented TX buffers of a frame allowed before "
5154 "forced collapsing");
5155
5156 /*
5157 * It seems all Broadcom controllers have a bug that can generate UDP
5158 * datagrams with checksum value 0 when TX UDP checksum offloading is
5159 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5160 * Even though the probability of generating such UDP datagrams is
5161 * low, I don't want to see FreeBSD boxes to inject such datagrams
5162 * into network so disable UDP checksum offloading by default. Users
5163 * still override this behavior by setting a sysctl variable,
5164 * dev.bge.0.forced_udpcsum.
5165 */
5166 sc->bge_forced_udpcsum = 0;
5167 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5168 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5169 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5170 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5171 "Enable UDP checksum offloading even if controller can "
5172 "generate UDP checksum value 0");
5173
5174 if (BGE_IS_5705_PLUS(sc))
5175 bge_add_sysctl_stats_regs(sc, ctx, children);
5176 else
5177 bge_add_sysctl_stats(sc, ctx, children);
5178}
5179
5180#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5181 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5182 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5183 desc)
5184
5185static void
5186bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5187 struct sysctl_oid_list *parent)
5188{
5189 struct sysctl_oid *tree;
5190 struct sysctl_oid_list *children, *schildren;
5191
5192 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5193 NULL, "BGE Statistics");
5194 schildren = children = SYSCTL_CHILDREN(tree);
5195 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5196 children, COSFramesDroppedDueToFilters,
5197 "FramesDroppedDueToFilters");
5198 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5199 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5200 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5201 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5202 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5203 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5204 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5205 children, ifInDiscards, "InputDiscards");
5206 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5207 children, ifInErrors, "InputErrors");
5208 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5209 children, nicRecvThresholdHit, "RecvThresholdHit");
5210 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5211 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5212 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5213 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5214 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5215 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5216 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5217 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5218 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5219 children, nicRingStatusUpdate, "RingStatusUpdate");
5220 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5221 children, nicInterrupts, "Interrupts");
5222 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5223 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5224 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5225 children, nicSendThresholdHit, "SendThresholdHit");
5226
5227 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5228 NULL, "BGE RX Statistics");
5229 children = SYSCTL_CHILDREN(tree);
5230 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5231 children, rxstats.ifHCInOctets, "ifHCInOctets");
5232 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5233 children, rxstats.etherStatsFragments, "Fragments");
5234 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5235 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5236 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5237 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5238 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5239 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5240 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5241 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5242 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5243 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5244 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5245 children, rxstats.xoffPauseFramesReceived,
5246 "xoffPauseFramesReceived");
5247 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5248 children, rxstats.macControlFramesReceived,
5249 "ControlFramesReceived");
5250 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5251 children, rxstats.xoffStateEntered, "xoffStateEntered");
5252 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5253 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5254 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5255 children, rxstats.etherStatsJabbers, "Jabbers");
5256 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5257 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5258 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5259 children, rxstats.inRangeLengthError, "inRangeLengthError");
5260 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5261 children, rxstats.outRangeLengthError, "outRangeLengthError");
5262
5263 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5264 NULL, "BGE TX Statistics");
5265 children = SYSCTL_CHILDREN(tree);
5266 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5267 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5268 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5269 children, txstats.etherStatsCollisions, "Collisions");
5270 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5271 children, txstats.outXonSent, "XonSent");
5272 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5273 children, txstats.outXoffSent, "XoffSent");
5274 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5275 children, txstats.flowControlDone, "flowControlDone");
5276 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5277 children, txstats.dot3StatsInternalMacTransmitErrors,
5278 "InternalMacTransmitErrors");
5279 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5280 children, txstats.dot3StatsSingleCollisionFrames,
5281 "SingleCollisionFrames");
5282 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5283 children, txstats.dot3StatsMultipleCollisionFrames,
5284 "MultipleCollisionFrames");
5285 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5286 children, txstats.dot3StatsDeferredTransmissions,
5287 "DeferredTransmissions");
5288 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5289 children, txstats.dot3StatsExcessiveCollisions,
5290 "ExcessiveCollisions");
5291 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5292 children, txstats.dot3StatsLateCollisions,
5293 "LateCollisions");
5294 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5295 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5296 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5297 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5298 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5299 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5300 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5301 children, txstats.dot3StatsCarrierSenseErrors,
5302 "CarrierSenseErrors");
5303 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5304 children, txstats.ifOutDiscards, "Discards");
5305 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5306 children, txstats.ifOutErrors, "Errors");
5307}
5308
5309#undef BGE_SYSCTL_STAT
5310
5311#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5312 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5313
5314static void
5315bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5316 struct sysctl_oid_list *parent)
5317{
5318 struct sysctl_oid *tree;
5319 struct sysctl_oid_list *child, *schild;
5320 struct bge_mac_stats *stats;
5321
5322 stats = &sc->bge_mac_stats;
5323 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5324 NULL, "BGE Statistics");
5325 schild = child = SYSCTL_CHILDREN(tree);
5326 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5327 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5328 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5329 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5330 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5331 &stats->DmaWriteHighPriQueueFull,
5332 "NIC DMA Write High Priority Queue Full");
5333 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5334 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5335 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5336 &stats->InputDiscards, "Discarded Input Frames");
5337 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5338 &stats->InputErrors, "Input Errors");
5339 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5340 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5341
5342 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5343 NULL, "BGE RX Statistics");
5344 child = SYSCTL_CHILDREN(tree);
5345 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5346 &stats->ifHCInOctets, "Inbound Octets");
5347 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5348 &stats->etherStatsFragments, "Fragments");
5349 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5350 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5351 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5352 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5353 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5354 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5355 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5356 &stats->dot3StatsFCSErrors, "FCS Errors");
5357 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5358 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5359 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5360 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5361 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5362 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5363 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5364 &stats->macControlFramesReceived, "MAC Control Frames Received");
5365 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5366 &stats->xoffStateEntered, "XOFF State Entered");
5367 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5368 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5369 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5370 &stats->etherStatsJabbers, "Jabbers");
5371 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5372 &stats->etherStatsUndersizePkts, "Undersized Packets");
5373
5374 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5375 NULL, "BGE TX Statistics");
5376 child = SYSCTL_CHILDREN(tree);
5377 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5378 &stats->ifHCOutOctets, "Outbound Octets");
5379 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5380 &stats->etherStatsCollisions, "TX Collisions");
5381 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5382 &stats->outXonSent, "XON Sent");
5383 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5384 &stats->outXoffSent, "XOFF Sent");
5385 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5386 &stats->dot3StatsInternalMacTransmitErrors,
5387 "Internal MAC TX Errors");
5388 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5389 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5390 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5391 &stats->dot3StatsMultipleCollisionFrames,
5392 "Multiple Collision Frames");
5393 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5394 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5395 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5396 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5397 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5398 &stats->dot3StatsLateCollisions, "Late Collisions");
5399 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5400 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5401 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5402 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5403 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5404 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5405}
5406
5407#undef BGE_SYSCTL_STAT_ADD64
5408
5409static int
5410bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5411{
5412 struct bge_softc *sc;
5413 uint32_t result;
5414 int offset;
5415
5416 sc = (struct bge_softc *)arg1;
5417 offset = arg2;
5418 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5419 offsetof(bge_hostaddr, bge_addr_lo));
5420 return (sysctl_handle_int(oidp, &result, 0, req));
5421}
5422
5423#ifdef BGE_REGISTER_DEBUG
5424static int
5425bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5426{
5427 struct bge_softc *sc;
5428 uint16_t *sbdata;
5429 int error;
5430 int result;
5431 int i, j;
5432
5433 result = -1;
5434 error = sysctl_handle_int(oidp, &result, 0, req);
5435 if (error || (req->newptr == NULL))
5436 return (error);
5437
5438 if (result == 1) {
5439 sc = (struct bge_softc *)arg1;
5440
5441 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5442 printf("Status Block:\n");
5443 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5444 printf("%06x:", i);
5445 for (j = 0; j < 8; j++) {
5446 printf(" %04x", sbdata[i]);
5447 i += 4;
5448 }
5449 printf("\n");
5450 }
5451
5452 printf("Registers:\n");
5453 for (i = 0x800; i < 0xA00; ) {
5454 printf("%06x:", i);
5455 for (j = 0; j < 8; j++) {
5456 printf(" %08x", CSR_READ_4(sc, i));
5457 i += 4;
5458 }
5459 printf("\n");
5460 }
5461
5462 printf("Hardware Flags:\n");
5463 if (BGE_IS_5755_PLUS(sc))
5464 printf(" - 5755 Plus\n");
5465 if (BGE_IS_575X_PLUS(sc))
5466 printf(" - 575X Plus\n");
5467 if (BGE_IS_5705_PLUS(sc))
5468 printf(" - 5705 Plus\n");
5469 if (BGE_IS_5714_FAMILY(sc))
5470 printf(" - 5714 Family\n");
5471 if (BGE_IS_5700_FAMILY(sc))
5472 printf(" - 5700 Family\n");
5473 if (sc->bge_flags & BGE_FLAG_JUMBO)
5474 printf(" - Supports Jumbo Frames\n");
5475 if (sc->bge_flags & BGE_FLAG_PCIX)
5476 printf(" - PCI-X Bus\n");
5477 if (sc->bge_flags & BGE_FLAG_PCIE)
5478 printf(" - PCI Express Bus\n");
5479 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5480 printf(" - No 3 LEDs\n");
5481 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5482 printf(" - RX Alignment Bug\n");
5483 }
5484
5485 return (error);
5486}
5487
5488static int
5489bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5490{
5491 struct bge_softc *sc;
5492 int error;
5493 uint16_t result;
5494 uint32_t val;
5495
5496 result = -1;
5497 error = sysctl_handle_int(oidp, &result, 0, req);
5498 if (error || (req->newptr == NULL))
5499 return (error);
5500
5501 if (result < 0x8000) {
5502 sc = (struct bge_softc *)arg1;
5503 val = CSR_READ_4(sc, result);
5504 printf("reg 0x%06X = 0x%08X\n", result, val);
5505 }
5506
5507 return (error);
5508}
5509
5510static int
5511bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5512{
5513 struct bge_softc *sc;
5514 int error;
5515 uint16_t result;
5516 uint32_t val;
5517
5518 result = -1;
5519 error = sysctl_handle_int(oidp, &result, 0, req);
5520 if (error || (req->newptr == NULL))
5521 return (error);
5522
5523 if (result < 0x8000) {
5524 sc = (struct bge_softc *)arg1;
5525 val = bge_readmem_ind(sc, result);
5526 printf("mem 0x%06X = 0x%08X\n", result, val);
5527 }
5528
5529 return (error);
5530}
5531#endif
5532
5533static int
5534bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5535{
5536
5537 if (sc->bge_flags & BGE_FLAG_EADDR)
5538 return (1);
5539
5540#ifdef __sparc64__
5541 OF_getetheraddr(sc->bge_dev, ether_addr);
5542 return (0);
5543#endif
5544 return (1);
5545}
5546
5547static int
5548bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5549{
5550 uint32_t mac_addr;
5551
5552 mac_addr = bge_readmem_ind(sc, 0x0c14);
5553 if ((mac_addr >> 16) == 0x484b) {
5554 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5555 ether_addr[1] = (uint8_t)mac_addr;
5556 mac_addr = bge_readmem_ind(sc, 0x0c18);
5557 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5558 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5559 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5560 ether_addr[5] = (uint8_t)mac_addr;
5561 return (0);
5562 }
5563 return (1);
5564}
5565
5566static int
5567bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5568{
5569 int mac_offset = BGE_EE_MAC_OFFSET;
5570
5571 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5572 mac_offset = BGE_EE_MAC_OFFSET_5906;
5573
5574 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5575 ETHER_ADDR_LEN));
5576}
5577
5578static int
5579bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5580{
5581
5582 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5583 return (1);
5584
5585 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5586 ETHER_ADDR_LEN));
5587}
5588
5589static int
5590bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5591{
5592 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5593 /* NOTE: Order is critical */
5594 bge_get_eaddr_fw,
5595 bge_get_eaddr_mem,
5596 bge_get_eaddr_nvram,
5597 bge_get_eaddr_eeprom,
5598 NULL
5599 };
5600 const bge_eaddr_fcn_t *func;
5601
5602 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5603 if ((*func)(sc, eaddr) == 0)
5604 break;
5605 }
5606 return (*func == NULL ? ENXIO : 0);
5607}