Deleted Added
full compact
if_bge.c (213410) if_bge.c (213411)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 213410 2010-10-04 18:01:23Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 213411 2010-10-04 18:09:01Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
218
219 { SK_VENDORID, SK_DEVICEID_ALTIMA },
220
221 { TC_VENDORID, TC_DEVICEID_3C996 },
222
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
226
227 { 0, 0 }
228};
229
230static const struct bge_vendor {
231 uint16_t v_id;
232 const char *v_name;
233} bge_vendors[] = {
234 { ALTEON_VENDORID, "Alteon" },
235 { ALTIMA_VENDORID, "Altima" },
236 { APPLE_VENDORID, "Apple" },
237 { BCOM_VENDORID, "Broadcom" },
238 { SK_VENDORID, "SysKonnect" },
239 { TC_VENDORID, "3Com" },
240 { FJTSU_VENDORID, "Fujitsu" },
241
242 { 0, NULL }
243};
244
245static const struct bge_revision {
246 uint32_t br_chipid;
247 const char *br_name;
248} bge_revisions[] = {
249 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
250 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
251 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
252 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
253 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
254 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
255 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
256 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
257 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
258 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
259 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
260 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
261 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
262 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
263 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
264 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
265 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
266 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
267 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
268 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
269 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
270 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
271 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
272 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
273 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
274 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
275 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
276 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
277 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
278 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
279 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
280 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
281 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
282 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
283 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
284 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
285 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
286 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
287 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
288 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
289 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
290 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
291 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
292 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
293 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
294 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
295 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
296 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
297 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
298 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
299 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
302 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
303 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
304 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
305 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
306 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
307 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
308
309 { 0, NULL }
310};
311
312/*
313 * Some defaults for major revisions, so that newer steppings
314 * that we don't know about have a shot at working.
315 */
316static const struct bge_revision bge_majorrevs[] = {
317 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
318 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
319 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
320 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
321 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
322 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
323 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
324 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
325 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
326 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
327 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
328 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
329 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
330 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
331 /* 5754 and 5787 share the same ASIC ID */
332 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
333 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
334 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
335
336 { 0, NULL }
337};
338
339#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
340#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
341#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
342#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
343#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
344#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
345
346const struct bge_revision * bge_lookup_rev(uint32_t);
347const struct bge_vendor * bge_lookup_vendor(uint16_t);
348
349typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
350
351static int bge_probe(device_t);
352static int bge_attach(device_t);
353static int bge_detach(device_t);
354static int bge_suspend(device_t);
355static int bge_resume(device_t);
356static void bge_release_resources(struct bge_softc *);
357static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
358static int bge_dma_alloc(struct bge_softc *);
359static void bge_dma_free(struct bge_softc *);
360static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
361 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
362
363static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
364static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
365static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
366static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
367static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
368
369static void bge_txeof(struct bge_softc *, uint16_t);
370static int bge_rxeof(struct bge_softc *, uint16_t, int);
371
372static void bge_asf_driver_up (struct bge_softc *);
373static void bge_tick(void *);
374static void bge_stats_clear_regs(struct bge_softc *);
375static void bge_stats_update(struct bge_softc *);
376static void bge_stats_update_regs(struct bge_softc *);
377static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
378 uint16_t *);
379static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
380
381static void bge_intr(void *);
382static int bge_msi_intr(void *);
383static void bge_intr_task(void *, int);
384static void bge_start_locked(struct ifnet *);
385static void bge_start(struct ifnet *);
386static int bge_ioctl(struct ifnet *, u_long, caddr_t);
387static void bge_init_locked(struct bge_softc *);
388static void bge_init(void *);
389static void bge_stop(struct bge_softc *);
390static void bge_watchdog(struct bge_softc *);
391static int bge_shutdown(device_t);
392static int bge_ifmedia_upd_locked(struct ifnet *);
393static int bge_ifmedia_upd(struct ifnet *);
394static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
395
396static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
397static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
398
399static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
400static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
401
402static void bge_setpromisc(struct bge_softc *);
403static void bge_setmulti(struct bge_softc *);
404static void bge_setvlan(struct bge_softc *);
405
406static __inline void bge_rxreuse_std(struct bge_softc *, int);
407static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
408static int bge_newbuf_std(struct bge_softc *, int);
409static int bge_newbuf_jumbo(struct bge_softc *, int);
410static int bge_init_rx_ring_std(struct bge_softc *);
411static void bge_free_rx_ring_std(struct bge_softc *);
412static int bge_init_rx_ring_jumbo(struct bge_softc *);
413static void bge_free_rx_ring_jumbo(struct bge_softc *);
414static void bge_free_tx_ring(struct bge_softc *);
415static int bge_init_tx_ring(struct bge_softc *);
416
417static int bge_chipinit(struct bge_softc *);
418static int bge_blockinit(struct bge_softc *);
419
420static int bge_has_eaddr(struct bge_softc *);
421static uint32_t bge_readmem_ind(struct bge_softc *, int);
422static void bge_writemem_ind(struct bge_softc *, int, int);
423static void bge_writembx(struct bge_softc *, int, int);
424#ifdef notdef
425static uint32_t bge_readreg_ind(struct bge_softc *, int);
426#endif
427static void bge_writemem_direct(struct bge_softc *, int, int);
428static void bge_writereg_ind(struct bge_softc *, int, int);
429
430static int bge_miibus_readreg(device_t, int, int);
431static int bge_miibus_writereg(device_t, int, int, int);
432static void bge_miibus_statchg(device_t);
433#ifdef DEVICE_POLLING
434static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
435#endif
436
437#define BGE_RESET_START 1
438#define BGE_RESET_STOP 2
439static void bge_sig_post_reset(struct bge_softc *, int);
440static void bge_sig_legacy(struct bge_softc *, int);
441static void bge_sig_pre_reset(struct bge_softc *, int);
442static void bge_stop_fw(struct bge_softc *);
443static int bge_reset(struct bge_softc *);
444static void bge_link_upd(struct bge_softc *);
445
446/*
447 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
448 * leak information to untrusted users. It is also known to cause alignment
449 * traps on certain architectures.
450 */
451#ifdef BGE_REGISTER_DEBUG
452static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
453static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
454static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
455#endif
456static void bge_add_sysctls(struct bge_softc *);
457static void bge_add_sysctl_stats_regs(struct bge_softc *,
458 struct sysctl_ctx_list *, struct sysctl_oid_list *);
459static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
460 struct sysctl_oid_list *);
461static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
462
463static device_method_t bge_methods[] = {
464 /* Device interface */
465 DEVMETHOD(device_probe, bge_probe),
466 DEVMETHOD(device_attach, bge_attach),
467 DEVMETHOD(device_detach, bge_detach),
468 DEVMETHOD(device_shutdown, bge_shutdown),
469 DEVMETHOD(device_suspend, bge_suspend),
470 DEVMETHOD(device_resume, bge_resume),
471
472 /* bus interface */
473 DEVMETHOD(bus_print_child, bus_generic_print_child),
474 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
475
476 /* MII interface */
477 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
478 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
479 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
480
481 { 0, 0 }
482};
483
484static driver_t bge_driver = {
485 "bge",
486 bge_methods,
487 sizeof(struct bge_softc)
488};
489
490static devclass_t bge_devclass;
491
492DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
493DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
494
495static int bge_allow_asf = 1;
496
497TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
498
499SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
500SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
501 "Allow ASF mode if available");
502
503#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
504#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
505#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
506#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
507#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
508
509static int
510bge_has_eaddr(struct bge_softc *sc)
511{
512#ifdef __sparc64__
513 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
514 device_t dev;
515 uint32_t subvendor;
516
517 dev = sc->bge_dev;
518
519 /*
520 * The on-board BGEs found in sun4u machines aren't fitted with
521 * an EEPROM which means that we have to obtain the MAC address
522 * via OFW and that some tests will always fail. We distinguish
523 * such BGEs by the subvendor ID, which also has to be obtained
524 * from OFW instead of the PCI configuration space as the latter
525 * indicates Broadcom as the subvendor of the netboot interface.
526 * For early Blade 1500 and 2500 we even have to check the OFW
527 * device path as the subvendor ID always defaults to Broadcom
528 * there.
529 */
530 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
531 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
532 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
533 return (0);
534 memset(buf, 0, sizeof(buf));
535 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
536 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
537 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
538 return (0);
539 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
540 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
541 return (0);
542 }
543#endif
544 return (1);
545}
546
547static uint32_t
548bge_readmem_ind(struct bge_softc *sc, int off)
549{
550 device_t dev;
551 uint32_t val;
552
553 dev = sc->bge_dev;
554
555 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
556 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
557 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
558 return (val);
559}
560
561static void
562bge_writemem_ind(struct bge_softc *sc, int off, int val)
563{
564 device_t dev;
565
566 dev = sc->bge_dev;
567
568 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
569 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
570 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
571}
572
573#ifdef notdef
574static uint32_t
575bge_readreg_ind(struct bge_softc *sc, int off)
576{
577 device_t dev;
578
579 dev = sc->bge_dev;
580
581 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
582 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
583}
584#endif
585
586static void
587bge_writereg_ind(struct bge_softc *sc, int off, int val)
588{
589 device_t dev;
590
591 dev = sc->bge_dev;
592
593 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
594 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
595}
596
597static void
598bge_writemem_direct(struct bge_softc *sc, int off, int val)
599{
600 CSR_WRITE_4(sc, off, val);
601}
602
603static void
604bge_writembx(struct bge_softc *sc, int off, int val)
605{
606 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
607 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
608
609 CSR_WRITE_4(sc, off, val);
610}
611
612/*
613 * Map a single buffer address.
614 */
615
616static void
617bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
618{
619 struct bge_dmamap_arg *ctx;
620
621 if (error)
622 return;
623
624 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
625
626 ctx = arg;
627 ctx->bge_busaddr = segs->ds_addr;
628}
629
630static uint8_t
631bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
632{
633 uint32_t access, byte = 0;
634 int i;
635
636 /* Lock. */
637 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
638 for (i = 0; i < 8000; i++) {
639 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
640 break;
641 DELAY(20);
642 }
643 if (i == 8000)
644 return (1);
645
646 /* Enable access. */
647 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
648 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
649
650 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
651 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
652 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
653 DELAY(10);
654 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
655 DELAY(10);
656 break;
657 }
658 }
659
660 if (i == BGE_TIMEOUT * 10) {
661 if_printf(sc->bge_ifp, "nvram read timed out\n");
662 return (1);
663 }
664
665 /* Get result. */
666 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
667
668 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
669
670 /* Disable access. */
671 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
672
673 /* Unlock. */
674 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
675 CSR_READ_4(sc, BGE_NVRAM_SWARB);
676
677 return (0);
678}
679
680/*
681 * Read a sequence of bytes from NVRAM.
682 */
683static int
684bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
685{
686 int err = 0, i;
687 uint8_t byte = 0;
688
689 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
690 return (1);
691
692 for (i = 0; i < cnt; i++) {
693 err = bge_nvram_getbyte(sc, off + i, &byte);
694 if (err)
695 break;
696 *(dest + i) = byte;
697 }
698
699 return (err ? 1 : 0);
700}
701
702/*
703 * Read a byte of data stored in the EEPROM at address 'addr.' The
704 * BCM570x supports both the traditional bitbang interface and an
705 * auto access interface for reading the EEPROM. We use the auto
706 * access method.
707 */
708static uint8_t
709bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
710{
711 int i;
712 uint32_t byte = 0;
713
714 /*
715 * Enable use of auto EEPROM access so we can avoid
716 * having to use the bitbang method.
717 */
718 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
719
720 /* Reset the EEPROM, load the clock period. */
721 CSR_WRITE_4(sc, BGE_EE_ADDR,
722 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
723 DELAY(20);
724
725 /* Issue the read EEPROM command. */
726 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
727
728 /* Wait for completion */
729 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
730 DELAY(10);
731 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
732 break;
733 }
734
735 if (i == BGE_TIMEOUT * 10) {
736 device_printf(sc->bge_dev, "EEPROM read timed out\n");
737 return (1);
738 }
739
740 /* Get result. */
741 byte = CSR_READ_4(sc, BGE_EE_DATA);
742
743 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
744
745 return (0);
746}
747
748/*
749 * Read a sequence of bytes from the EEPROM.
750 */
751static int
752bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
753{
754 int i, error = 0;
755 uint8_t byte = 0;
756
757 for (i = 0; i < cnt; i++) {
758 error = bge_eeprom_getbyte(sc, off + i, &byte);
759 if (error)
760 break;
761 *(dest + i) = byte;
762 }
763
764 return (error ? 1 : 0);
765}
766
767static int
768bge_miibus_readreg(device_t dev, int phy, int reg)
769{
770 struct bge_softc *sc;
771 uint32_t val, autopoll;
772 int i;
773
774 sc = device_get_softc(dev);
775
776 /*
777 * Broadcom's own driver always assumes the internal
778 * PHY is at GMII address 1. On some chips, the PHY responds
779 * to accesses at all addresses, which could cause us to
780 * bogusly attach the PHY 32 times at probe type. Always
781 * restricting the lookup to address 1 is simpler than
782 * trying to figure out which chips revisions should be
783 * special-cased.
784 */
785 if (phy != 1)
786 return (0);
787
788 /* Reading with autopolling on may trigger PCI errors */
789 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
790 if (autopoll & BGE_MIMODE_AUTOPOLL) {
791 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
792 DELAY(40);
793 }
794
795 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
796 BGE_MIPHY(phy) | BGE_MIREG(reg));
797
798 for (i = 0; i < BGE_TIMEOUT; i++) {
799 DELAY(10);
800 val = CSR_READ_4(sc, BGE_MI_COMM);
801 if (!(val & BGE_MICOMM_BUSY))
802 break;
803 }
804
805 if (i == BGE_TIMEOUT) {
806 device_printf(sc->bge_dev,
807 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
808 phy, reg, val);
809 val = 0;
810 goto done;
811 }
812
813 DELAY(5);
814 val = CSR_READ_4(sc, BGE_MI_COMM);
815
816done:
817 if (autopoll & BGE_MIMODE_AUTOPOLL) {
818 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
819 DELAY(40);
820 }
821
822 if (val & BGE_MICOMM_READFAIL)
823 return (0);
824
825 return (val & 0xFFFF);
826}
827
828static int
829bge_miibus_writereg(device_t dev, int phy, int reg, int val)
830{
831 struct bge_softc *sc;
832 uint32_t autopoll;
833 int i;
834
835 sc = device_get_softc(dev);
836
837 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
838 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
839 return (0);
840
841 /* Reading with autopolling on may trigger PCI errors */
842 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
843 if (autopoll & BGE_MIMODE_AUTOPOLL) {
844 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
845 DELAY(40);
846 }
847
848 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
849 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
850
851 for (i = 0; i < BGE_TIMEOUT; i++) {
852 DELAY(10);
853 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
854 DELAY(5);
855 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
856 break;
857 }
858 }
859
860 if (i == BGE_TIMEOUT) {
861 device_printf(sc->bge_dev,
862 "PHY write timed out (phy %d, reg %d, val %d)\n",
863 phy, reg, val);
864 return (0);
865 }
866
867 if (autopoll & BGE_MIMODE_AUTOPOLL) {
868 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
869 DELAY(40);
870 }
871
872 return (0);
873}
874
875static void
876bge_miibus_statchg(device_t dev)
877{
878 struct bge_softc *sc;
879 struct mii_data *mii;
880 sc = device_get_softc(dev);
881 mii = device_get_softc(sc->bge_miibus);
882
883 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
884 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
885 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
886 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
887 else
888 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
889
890 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
891 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
892 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG1)
893 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
894 else
895 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
896 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0)
897 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
898 else
899 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
900 } else {
901 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
902 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
903 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
904 }
905}
906
907/*
908 * Intialize a standard receive ring descriptor.
909 */
910static int
911bge_newbuf_std(struct bge_softc *sc, int i)
912{
913 struct mbuf *m;
914 struct bge_rx_bd *r;
915 bus_dma_segment_t segs[1];
916 bus_dmamap_t map;
917 int error, nsegs;
918
919 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
920 if (m == NULL)
921 return (ENOBUFS);
922 m->m_len = m->m_pkthdr.len = MCLBYTES;
923 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
924 m_adj(m, ETHER_ALIGN);
925
926 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
927 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
928 if (error != 0) {
929 m_freem(m);
930 return (error);
931 }
932 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
933 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
934 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
935 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
936 sc->bge_cdata.bge_rx_std_dmamap[i]);
937 }
938 map = sc->bge_cdata.bge_rx_std_dmamap[i];
939 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
940 sc->bge_cdata.bge_rx_std_sparemap = map;
941 sc->bge_cdata.bge_rx_std_chain[i] = m;
942 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
943 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
944 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
945 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
946 r->bge_flags = BGE_RXBDFLAG_END;
947 r->bge_len = segs[0].ds_len;
948 r->bge_idx = i;
949
950 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
951 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
952
953 return (0);
954}
955
956/*
957 * Initialize a jumbo receive ring descriptor. This allocates
958 * a jumbo buffer from the pool managed internally by the driver.
959 */
960static int
961bge_newbuf_jumbo(struct bge_softc *sc, int i)
962{
963 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
964 bus_dmamap_t map;
965 struct bge_extrx_bd *r;
966 struct mbuf *m;
967 int error, nsegs;
968
969 MGETHDR(m, M_DONTWAIT, MT_DATA);
970 if (m == NULL)
971 return (ENOBUFS);
972
973 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
974 if (!(m->m_flags & M_EXT)) {
975 m_freem(m);
976 return (ENOBUFS);
977 }
978 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
979 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
980 m_adj(m, ETHER_ALIGN);
981
982 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
983 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
984 if (error != 0) {
985 m_freem(m);
986 return (error);
987 }
988
989 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
990 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
991 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
992 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
993 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
994 }
995 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
996 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
997 sc->bge_cdata.bge_rx_jumbo_sparemap;
998 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
999 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1000 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1001 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1002 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1003 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1004
1005 /*
1006 * Fill in the extended RX buffer descriptor.
1007 */
1008 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1009 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1010 r->bge_idx = i;
1011 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1012 switch (nsegs) {
1013 case 4:
1014 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1015 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1016 r->bge_len3 = segs[3].ds_len;
1017 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1018 case 3:
1019 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1020 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1021 r->bge_len2 = segs[2].ds_len;
1022 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1023 case 2:
1024 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1025 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1026 r->bge_len1 = segs[1].ds_len;
1027 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1028 case 1:
1029 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1030 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1031 r->bge_len0 = segs[0].ds_len;
1032 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1033 break;
1034 default:
1035 panic("%s: %d segments\n", __func__, nsegs);
1036 }
1037
1038 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1039 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1040
1041 return (0);
1042}
1043
1044static int
1045bge_init_rx_ring_std(struct bge_softc *sc)
1046{
1047 int error, i;
1048
1049 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1050 sc->bge_std = 0;
1051 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1052 if ((error = bge_newbuf_std(sc, i)) != 0)
1053 return (error);
1054 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1055 }
1056
1057 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1058 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1059
1060 sc->bge_std = 0;
1061 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1062
1063 return (0);
1064}
1065
1066static void
1067bge_free_rx_ring_std(struct bge_softc *sc)
1068{
1069 int i;
1070
1071 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1072 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1073 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1074 sc->bge_cdata.bge_rx_std_dmamap[i],
1075 BUS_DMASYNC_POSTREAD);
1076 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1077 sc->bge_cdata.bge_rx_std_dmamap[i]);
1078 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1079 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1080 }
1081 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1082 sizeof(struct bge_rx_bd));
1083 }
1084}
1085
1086static int
1087bge_init_rx_ring_jumbo(struct bge_softc *sc)
1088{
1089 struct bge_rcb *rcb;
1090 int error, i;
1091
1092 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1093 sc->bge_jumbo = 0;
1094 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1095 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1096 return (error);
1097 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1098 }
1099
1100 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1101 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1102
1103 sc->bge_jumbo = 0;
1104
1105 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1106 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1107 BGE_RCB_FLAG_USE_EXT_RX_BD);
1108 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1109
1110 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1111
1112 return (0);
1113}
1114
1115static void
1116bge_free_rx_ring_jumbo(struct bge_softc *sc)
1117{
1118 int i;
1119
1120 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1121 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1122 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1123 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1124 BUS_DMASYNC_POSTREAD);
1125 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1126 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1127 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1128 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1129 }
1130 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1131 sizeof(struct bge_extrx_bd));
1132 }
1133}
1134
1135static void
1136bge_free_tx_ring(struct bge_softc *sc)
1137{
1138 int i;
1139
1140 if (sc->bge_ldata.bge_tx_ring == NULL)
1141 return;
1142
1143 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1144 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1145 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1146 sc->bge_cdata.bge_tx_dmamap[i],
1147 BUS_DMASYNC_POSTWRITE);
1148 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1149 sc->bge_cdata.bge_tx_dmamap[i]);
1150 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1151 sc->bge_cdata.bge_tx_chain[i] = NULL;
1152 }
1153 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1154 sizeof(struct bge_tx_bd));
1155 }
1156}
1157
1158static int
1159bge_init_tx_ring(struct bge_softc *sc)
1160{
1161 sc->bge_txcnt = 0;
1162 sc->bge_tx_saved_considx = 0;
1163
1164 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1165 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1166 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1167
1168 /* Initialize transmit producer index for host-memory send ring. */
1169 sc->bge_tx_prodidx = 0;
1170 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1171
1172 /* 5700 b2 errata */
1173 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1174 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1175
1176 /* NIC-memory send ring not used; initialize to zero. */
1177 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1178 /* 5700 b2 errata */
1179 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1180 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1181
1182 return (0);
1183}
1184
1185static void
1186bge_setpromisc(struct bge_softc *sc)
1187{
1188 struct ifnet *ifp;
1189
1190 BGE_LOCK_ASSERT(sc);
1191
1192 ifp = sc->bge_ifp;
1193
1194 /* Enable or disable promiscuous mode as needed. */
1195 if (ifp->if_flags & IFF_PROMISC)
1196 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1197 else
1198 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1199}
1200
1201static void
1202bge_setmulti(struct bge_softc *sc)
1203{
1204 struct ifnet *ifp;
1205 struct ifmultiaddr *ifma;
1206 uint32_t hashes[4] = { 0, 0, 0, 0 };
1207 int h, i;
1208
1209 BGE_LOCK_ASSERT(sc);
1210
1211 ifp = sc->bge_ifp;
1212
1213 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1214 for (i = 0; i < 4; i++)
1215 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1216 return;
1217 }
1218
1219 /* First, zot all the existing filters. */
1220 for (i = 0; i < 4; i++)
1221 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1222
1223 /* Now program new ones. */
1224 if_maddr_rlock(ifp);
1225 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1226 if (ifma->ifma_addr->sa_family != AF_LINK)
1227 continue;
1228 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1229 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1230 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1231 }
1232 if_maddr_runlock(ifp);
1233
1234 for (i = 0; i < 4; i++)
1235 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1236}
1237
1238static void
1239bge_setvlan(struct bge_softc *sc)
1240{
1241 struct ifnet *ifp;
1242
1243 BGE_LOCK_ASSERT(sc);
1244
1245 ifp = sc->bge_ifp;
1246
1247 /* Enable or disable VLAN tag stripping as needed. */
1248 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1249 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1250 else
1251 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1252}
1253
1254static void
1255bge_sig_pre_reset(struct bge_softc *sc, int type)
1256{
1257
1258 /*
1259 * Some chips don't like this so only do this if ASF is enabled
1260 */
1261 if (sc->bge_asf_mode)
1262 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1263
1264 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1265 switch (type) {
1266 case BGE_RESET_START:
1267 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1268 break;
1269 case BGE_RESET_STOP:
1270 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1271 break;
1272 }
1273 }
1274}
1275
1276static void
1277bge_sig_post_reset(struct bge_softc *sc, int type)
1278{
1279
1280 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1281 switch (type) {
1282 case BGE_RESET_START:
1283 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1284 /* START DONE */
1285 break;
1286 case BGE_RESET_STOP:
1287 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1288 break;
1289 }
1290 }
1291}
1292
1293static void
1294bge_sig_legacy(struct bge_softc *sc, int type)
1295{
1296
1297 if (sc->bge_asf_mode) {
1298 switch (type) {
1299 case BGE_RESET_START:
1300 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1301 break;
1302 case BGE_RESET_STOP:
1303 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1304 break;
1305 }
1306 }
1307}
1308
1309static void
1310bge_stop_fw(struct bge_softc *sc)
1311{
1312 int i;
1313
1314 if (sc->bge_asf_mode) {
1315 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1316 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1317 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1318
1319 for (i = 0; i < 100; i++ ) {
1320 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1321 break;
1322 DELAY(10);
1323 }
1324 }
1325}
1326
1327/*
1328 * Do endian, PCI and DMA initialization.
1329 */
1330static int
1331bge_chipinit(struct bge_softc *sc)
1332{
1333 uint32_t dma_rw_ctl;
1334 uint16_t val;
1335 int i;
1336
1337 /* Set endianness before we access any non-PCI registers. */
1338 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1339
1340 /* Clear the MAC control register */
1341 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1342
1343 /*
1344 * Clear the MAC statistics block in the NIC's
1345 * internal memory.
1346 */
1347 for (i = BGE_STATS_BLOCK;
1348 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1349 BGE_MEMWIN_WRITE(sc, i, 0);
1350
1351 for (i = BGE_STATUS_BLOCK;
1352 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1353 BGE_MEMWIN_WRITE(sc, i, 0);
1354
1355 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1356 /*
1357 * Fix data corruption caused by non-qword write with WB.
1358 * Fix master abort in PCI mode.
1359 * Fix PCI latency timer.
1360 */
1361 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1362 val |= (1 << 10) | (1 << 12) | (1 << 13);
1363 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1364 }
1365
1366 /*
1367 * Set up the PCI DMA control register.
1368 */
1369 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1370 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1371 if (sc->bge_flags & BGE_FLAG_PCIE) {
1372 /* Read watermark not used, 128 bytes for write. */
1373 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1374 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1375 if (BGE_IS_5714_FAMILY(sc)) {
1376 /* 256 bytes for read and write. */
1377 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1378 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1379 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1380 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1381 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1382 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1383 /*
1384 * In the BCM5703, the DMA read watermark should
1385 * be set to less than or equal to the maximum
1386 * memory read byte count of the PCI-X command
1387 * register.
1388 */
1389 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1390 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1391 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1392 /* 1536 bytes for read, 384 bytes for write. */
1393 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1394 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1395 } else {
1396 /* 384 bytes for read and write. */
1397 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1398 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1399 0x0F;
1400 }
1401 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1402 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1403 uint32_t tmp;
1404
1405 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1406 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1407 if (tmp == 6 || tmp == 7)
1408 dma_rw_ctl |=
1409 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1410
1411 /* Set PCI-X DMA write workaround. */
1412 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1413 }
1414 } else {
1415 /* Conventional PCI bus: 256 bytes for read and write. */
1416 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1417 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1418
1419 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1420 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1421 dma_rw_ctl |= 0x0F;
1422 }
1423 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1424 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1425 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1426 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1427 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1428 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1429 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1430 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1431
1432 /*
1433 * Set up general mode register.
1434 */
1435 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1436 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1437 BGE_MODECTL_TX_NO_PHDR_CSUM);
1438
1439 /*
1440 * BCM5701 B5 have a bug causing data corruption when using
1441 * 64-bit DMA reads, which can be terminated early and then
1442 * completed later as 32-bit accesses, in combination with
1443 * certain bridges.
1444 */
1445 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1446 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1447 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1448
1449 /*
1450 * Tell the firmware the driver is running
1451 */
1452 if (sc->bge_asf_mode & ASF_STACKUP)
1453 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1454
1455 /*
1456 * Disable memory write invalidate. Apparently it is not supported
1457 * properly by these devices. Also ensure that INTx isn't disabled,
1458 * as these chips need it even when using MSI.
1459 */
1460 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1461 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1462
1463 /* Set the timer prescaler (always 66Mhz) */
1464 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1465
1466 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1468 DELAY(40); /* XXX */
1469
1470 /* Put PHY into ready state */
1471 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1472 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1473 DELAY(40);
1474 }
1475
1476 return (0);
1477}
1478
1479static int
1480bge_blockinit(struct bge_softc *sc)
1481{
1482 struct bge_rcb *rcb;
1483 bus_size_t vrcb;
1484 bge_hostaddr taddr;
1485 uint32_t val;
1486 int i;
1487
1488 /*
1489 * Initialize the memory window pointer register so that
1490 * we can access the first 32K of internal NIC RAM. This will
1491 * allow us to set up the TX send ring RCBs and the RX return
1492 * ring RCBs, plus other things which live in NIC memory.
1493 */
1494 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1495
1496 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1497
1498 if (!(BGE_IS_5705_PLUS(sc))) {
1499 /* Configure mbuf memory pool */
1500 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1501 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1502 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1503 else
1504 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1505
1506 /* Configure DMA resource pool */
1507 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1508 BGE_DMA_DESCRIPTORS);
1509 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1510 }
1511
1512 /* Configure mbuf pool watermarks */
1513 if (!BGE_IS_5705_PLUS(sc)) {
1514 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1515 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1516 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1517 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1518 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1519 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1520 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1521 } else {
1522 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1523 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1524 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1525 }
1526
1527 /* Configure DMA resource watermarks */
1528 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1529 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1530
1531 /* Enable buffer manager */
1532 if (!(BGE_IS_5705_PLUS(sc))) {
1533 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1534 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1535
1536 /* Poll for buffer manager start indication */
1537 for (i = 0; i < BGE_TIMEOUT; i++) {
1538 DELAY(10);
1539 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1540 break;
1541 }
1542
1543 if (i == BGE_TIMEOUT) {
1544 device_printf(sc->bge_dev,
1545 "buffer manager failed to start\n");
1546 return (ENXIO);
1547 }
1548 }
1549
1550 /* Enable flow-through queues */
1551 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1552 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1553
1554 /* Wait until queue initialization is complete */
1555 for (i = 0; i < BGE_TIMEOUT; i++) {
1556 DELAY(10);
1557 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1558 break;
1559 }
1560
1561 if (i == BGE_TIMEOUT) {
1562 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1563 return (ENXIO);
1564 }
1565
1566 /* Initialize the standard RX ring control block */
1567 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1568 rcb->bge_hostaddr.bge_addr_lo =
1569 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1570 rcb->bge_hostaddr.bge_addr_hi =
1571 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1572 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1573 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1574 if (BGE_IS_5705_PLUS(sc))
1575 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1576 else
1577 rcb->bge_maxlen_flags =
1578 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1579 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1580 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1581 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1582
1583 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1584 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1585
1586 /*
1587 * Initialize the jumbo RX ring control block
1588 * We set the 'ring disabled' bit in the flags
1589 * field until we're actually ready to start
1590 * using this ring (i.e. once we set the MTU
1591 * high enough to require it).
1592 */
1593 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1594 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1595
1596 rcb->bge_hostaddr.bge_addr_lo =
1597 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1598 rcb->bge_hostaddr.bge_addr_hi =
1599 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1600 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1601 sc->bge_cdata.bge_rx_jumbo_ring_map,
1602 BUS_DMASYNC_PREREAD);
1603 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1604 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1605 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1606 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1607 rcb->bge_hostaddr.bge_addr_hi);
1608 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1609 rcb->bge_hostaddr.bge_addr_lo);
1610
1611 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1612 rcb->bge_maxlen_flags);
1613 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1614
1615 /* Set up dummy disabled mini ring RCB */
1616 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1617 rcb->bge_maxlen_flags =
1618 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1619 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1620 rcb->bge_maxlen_flags);
1621 }
1622
1623 /*
1624 * Set the BD ring replentish thresholds. The recommended
1625 * values are 1/8th the number of descriptors allocated to
1626 * each ring.
1627 * XXX The 5754 requires a lower threshold, so it might be a
1628 * requirement of all 575x family chips. The Linux driver sets
1629 * the lower threshold for all 5705 family chips as well, but there
1630 * are reports that it might not need to be so strict.
1631 *
1632 * XXX Linux does some extra fiddling here for the 5906 parts as
1633 * well.
1634 */
1635 if (BGE_IS_5705_PLUS(sc))
1636 val = 8;
1637 else
1638 val = BGE_STD_RX_RING_CNT / 8;
1639 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1640 if (BGE_IS_JUMBO_CAPABLE(sc))
1641 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1642 BGE_JUMBO_RX_RING_CNT/8);
1643
1644 /*
1645 * Disable all unused send rings by setting the 'ring disabled'
1646 * bit in the flags field of all the TX send ring control blocks.
1647 * These are located in NIC memory.
1648 */
1649 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1650 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1651 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1652 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1653 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1654 vrcb += sizeof(struct bge_rcb);
1655 }
1656
1657 /* Configure TX RCB 0 (we use only the first ring) */
1658 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1659 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1660 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1661 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1662 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1663 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1664 if (!(BGE_IS_5705_PLUS(sc)))
1665 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1666 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1667
1668 /* Disable all unused RX return rings */
1669 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1670 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1671 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1672 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1673 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1674 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1675 BGE_RCB_FLAG_RING_DISABLED));
1676 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1677 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1678 (i * (sizeof(uint64_t))), 0);
1679 vrcb += sizeof(struct bge_rcb);
1680 }
1681
1682 /* Initialize RX ring indexes */
1683 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1684 if (BGE_IS_JUMBO_CAPABLE(sc))
1685 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1686 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1687 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1688
1689 /*
1690 * Set up RX return ring 0
1691 * Note that the NIC address for RX return rings is 0x00000000.
1692 * The return rings live entirely within the host, so the
1693 * nicaddr field in the RCB isn't used.
1694 */
1695 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1696 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1697 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1698 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1699 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1700 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1701 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1702
1703 /* Set random backoff seed for TX */
1704 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1705 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1706 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1707 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1708 BGE_TX_BACKOFF_SEED_MASK);
1709
1710 /* Set inter-packet gap */
1711 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1712
1713 /*
1714 * Specify which ring to use for packets that don't match
1715 * any RX rules.
1716 */
1717 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1718
1719 /*
1720 * Configure number of RX lists. One interrupt distribution
1721 * list, sixteen active lists, one bad frames class.
1722 */
1723 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1724
1725 /* Inialize RX list placement stats mask. */
1726 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1727 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1728
1729 /* Disable host coalescing until we get it set up */
1730 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1731
1732 /* Poll to make sure it's shut down. */
1733 for (i = 0; i < BGE_TIMEOUT; i++) {
1734 DELAY(10);
1735 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1736 break;
1737 }
1738
1739 if (i == BGE_TIMEOUT) {
1740 device_printf(sc->bge_dev,
1741 "host coalescing engine failed to idle\n");
1742 return (ENXIO);
1743 }
1744
1745 /* Set up host coalescing defaults */
1746 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1747 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1748 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1749 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1750 if (!(BGE_IS_5705_PLUS(sc))) {
1751 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1752 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1753 }
1754 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1755 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1756
1757 /* Set up address of statistics block */
1758 if (!(BGE_IS_5705_PLUS(sc))) {
1759 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1760 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1761 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1762 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1763 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1764 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1765 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1766 }
1767
1768 /* Set up address of status block */
1769 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1770 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1771 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1772 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1773
1774 /* Set up status block size. */
1775 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1776 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1777 val = BGE_STATBLKSZ_FULL;
1778 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1779 } else {
1780 val = BGE_STATBLKSZ_32BYTE;
1781 bzero(sc->bge_ldata.bge_status_block, 32);
1782 }
1783 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1784 sc->bge_cdata.bge_status_map,
1785 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1786
1787 /* Turn on host coalescing state machine */
1788 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1789
1790 /* Turn on RX BD completion state machine and enable attentions */
1791 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1792 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1793
1794 /* Turn on RX list placement state machine */
1795 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1796
1797 /* Turn on RX list selector state machine. */
1798 if (!(BGE_IS_5705_PLUS(sc)))
1799 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1800
1801 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1802 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1803 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1804 BGE_MACMODE_FRMHDR_DMA_ENB;
1805
1806 if (sc->bge_flags & BGE_FLAG_TBI)
1807 val |= BGE_PORTMODE_TBI;
1808 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1809 val |= BGE_PORTMODE_GMII;
1810 else
1811 val |= BGE_PORTMODE_MII;
1812
1813 /* Turn on DMA, clear stats */
1814 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1815
1816 /* Set misc. local control, enable interrupts on attentions */
1817 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1818
1819#ifdef notdef
1820 /* Assert GPIO pins for PHY reset */
1821 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1822 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1823 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1824 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1825#endif
1826
1827 /* Turn on DMA completion state machine */
1828 if (!(BGE_IS_5705_PLUS(sc)))
1829 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1830
1831 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1832
1833 /* Enable host coalescing bug fix. */
1834 if (BGE_IS_5755_PLUS(sc))
1835 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1836
1837 /* Request larger DMA burst size to get better performance. */
1838 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1839 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1840
1841 /* Turn on write DMA state machine */
1842 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1843 DELAY(40);
1844
1845 /* Turn on read DMA state machine */
1846 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1847 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1848 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1849 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1850 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1851 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1852 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1853 if (sc->bge_flags & BGE_FLAG_PCIE)
1854 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1855 if (sc->bge_flags & BGE_FLAG_TSO) {
1856 val |= BGE_RDMAMODE_TSO4_ENABLE;
1857 if (sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1858 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1859 val |= BGE_RDMAMODE_TSO6_ENABLE;
1860 }
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
218
219 { SK_VENDORID, SK_DEVICEID_ALTIMA },
220
221 { TC_VENDORID, TC_DEVICEID_3C996 },
222
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
225 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
226
227 { 0, 0 }
228};
229
230static const struct bge_vendor {
231 uint16_t v_id;
232 const char *v_name;
233} bge_vendors[] = {
234 { ALTEON_VENDORID, "Alteon" },
235 { ALTIMA_VENDORID, "Altima" },
236 { APPLE_VENDORID, "Apple" },
237 { BCOM_VENDORID, "Broadcom" },
238 { SK_VENDORID, "SysKonnect" },
239 { TC_VENDORID, "3Com" },
240 { FJTSU_VENDORID, "Fujitsu" },
241
242 { 0, NULL }
243};
244
245static const struct bge_revision {
246 uint32_t br_chipid;
247 const char *br_name;
248} bge_revisions[] = {
249 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
250 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
251 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
252 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
253 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
254 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
255 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
256 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
257 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
258 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
259 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
260 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
261 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
262 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
263 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
264 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
265 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
266 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
267 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
268 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
269 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
270 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
271 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
272 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
273 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
274 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
275 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
276 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
277 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
278 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
279 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
280 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
281 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
282 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
283 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
284 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
285 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
286 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
287 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
288 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
289 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
290 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
291 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
292 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
293 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
294 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
295 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
296 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
297 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
298 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
299 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
302 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
303 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
304 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
305 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
306 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
307 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
308
309 { 0, NULL }
310};
311
312/*
313 * Some defaults for major revisions, so that newer steppings
314 * that we don't know about have a shot at working.
315 */
316static const struct bge_revision bge_majorrevs[] = {
317 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
318 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
319 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
320 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
321 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
322 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
323 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
324 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
325 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
326 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
327 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
328 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
329 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
330 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
331 /* 5754 and 5787 share the same ASIC ID */
332 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
333 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
334 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
335
336 { 0, NULL }
337};
338
339#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
340#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
341#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
342#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
343#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
344#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
345
346const struct bge_revision * bge_lookup_rev(uint32_t);
347const struct bge_vendor * bge_lookup_vendor(uint16_t);
348
349typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
350
351static int bge_probe(device_t);
352static int bge_attach(device_t);
353static int bge_detach(device_t);
354static int bge_suspend(device_t);
355static int bge_resume(device_t);
356static void bge_release_resources(struct bge_softc *);
357static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
358static int bge_dma_alloc(struct bge_softc *);
359static void bge_dma_free(struct bge_softc *);
360static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
361 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
362
363static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
364static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
365static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
366static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
367static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
368
369static void bge_txeof(struct bge_softc *, uint16_t);
370static int bge_rxeof(struct bge_softc *, uint16_t, int);
371
372static void bge_asf_driver_up (struct bge_softc *);
373static void bge_tick(void *);
374static void bge_stats_clear_regs(struct bge_softc *);
375static void bge_stats_update(struct bge_softc *);
376static void bge_stats_update_regs(struct bge_softc *);
377static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
378 uint16_t *);
379static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
380
381static void bge_intr(void *);
382static int bge_msi_intr(void *);
383static void bge_intr_task(void *, int);
384static void bge_start_locked(struct ifnet *);
385static void bge_start(struct ifnet *);
386static int bge_ioctl(struct ifnet *, u_long, caddr_t);
387static void bge_init_locked(struct bge_softc *);
388static void bge_init(void *);
389static void bge_stop(struct bge_softc *);
390static void bge_watchdog(struct bge_softc *);
391static int bge_shutdown(device_t);
392static int bge_ifmedia_upd_locked(struct ifnet *);
393static int bge_ifmedia_upd(struct ifnet *);
394static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
395
396static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
397static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
398
399static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
400static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
401
402static void bge_setpromisc(struct bge_softc *);
403static void bge_setmulti(struct bge_softc *);
404static void bge_setvlan(struct bge_softc *);
405
406static __inline void bge_rxreuse_std(struct bge_softc *, int);
407static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
408static int bge_newbuf_std(struct bge_softc *, int);
409static int bge_newbuf_jumbo(struct bge_softc *, int);
410static int bge_init_rx_ring_std(struct bge_softc *);
411static void bge_free_rx_ring_std(struct bge_softc *);
412static int bge_init_rx_ring_jumbo(struct bge_softc *);
413static void bge_free_rx_ring_jumbo(struct bge_softc *);
414static void bge_free_tx_ring(struct bge_softc *);
415static int bge_init_tx_ring(struct bge_softc *);
416
417static int bge_chipinit(struct bge_softc *);
418static int bge_blockinit(struct bge_softc *);
419
420static int bge_has_eaddr(struct bge_softc *);
421static uint32_t bge_readmem_ind(struct bge_softc *, int);
422static void bge_writemem_ind(struct bge_softc *, int, int);
423static void bge_writembx(struct bge_softc *, int, int);
424#ifdef notdef
425static uint32_t bge_readreg_ind(struct bge_softc *, int);
426#endif
427static void bge_writemem_direct(struct bge_softc *, int, int);
428static void bge_writereg_ind(struct bge_softc *, int, int);
429
430static int bge_miibus_readreg(device_t, int, int);
431static int bge_miibus_writereg(device_t, int, int, int);
432static void bge_miibus_statchg(device_t);
433#ifdef DEVICE_POLLING
434static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
435#endif
436
437#define BGE_RESET_START 1
438#define BGE_RESET_STOP 2
439static void bge_sig_post_reset(struct bge_softc *, int);
440static void bge_sig_legacy(struct bge_softc *, int);
441static void bge_sig_pre_reset(struct bge_softc *, int);
442static void bge_stop_fw(struct bge_softc *);
443static int bge_reset(struct bge_softc *);
444static void bge_link_upd(struct bge_softc *);
445
446/*
447 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
448 * leak information to untrusted users. It is also known to cause alignment
449 * traps on certain architectures.
450 */
451#ifdef BGE_REGISTER_DEBUG
452static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
453static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
454static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
455#endif
456static void bge_add_sysctls(struct bge_softc *);
457static void bge_add_sysctl_stats_regs(struct bge_softc *,
458 struct sysctl_ctx_list *, struct sysctl_oid_list *);
459static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
460 struct sysctl_oid_list *);
461static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
462
463static device_method_t bge_methods[] = {
464 /* Device interface */
465 DEVMETHOD(device_probe, bge_probe),
466 DEVMETHOD(device_attach, bge_attach),
467 DEVMETHOD(device_detach, bge_detach),
468 DEVMETHOD(device_shutdown, bge_shutdown),
469 DEVMETHOD(device_suspend, bge_suspend),
470 DEVMETHOD(device_resume, bge_resume),
471
472 /* bus interface */
473 DEVMETHOD(bus_print_child, bus_generic_print_child),
474 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
475
476 /* MII interface */
477 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
478 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
479 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
480
481 { 0, 0 }
482};
483
484static driver_t bge_driver = {
485 "bge",
486 bge_methods,
487 sizeof(struct bge_softc)
488};
489
490static devclass_t bge_devclass;
491
492DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
493DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
494
495static int bge_allow_asf = 1;
496
497TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
498
499SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
500SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
501 "Allow ASF mode if available");
502
503#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
504#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
505#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
506#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
507#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
508
509static int
510bge_has_eaddr(struct bge_softc *sc)
511{
512#ifdef __sparc64__
513 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
514 device_t dev;
515 uint32_t subvendor;
516
517 dev = sc->bge_dev;
518
519 /*
520 * The on-board BGEs found in sun4u machines aren't fitted with
521 * an EEPROM which means that we have to obtain the MAC address
522 * via OFW and that some tests will always fail. We distinguish
523 * such BGEs by the subvendor ID, which also has to be obtained
524 * from OFW instead of the PCI configuration space as the latter
525 * indicates Broadcom as the subvendor of the netboot interface.
526 * For early Blade 1500 and 2500 we even have to check the OFW
527 * device path as the subvendor ID always defaults to Broadcom
528 * there.
529 */
530 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
531 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
532 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
533 return (0);
534 memset(buf, 0, sizeof(buf));
535 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
536 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
537 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
538 return (0);
539 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
540 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
541 return (0);
542 }
543#endif
544 return (1);
545}
546
547static uint32_t
548bge_readmem_ind(struct bge_softc *sc, int off)
549{
550 device_t dev;
551 uint32_t val;
552
553 dev = sc->bge_dev;
554
555 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
556 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
557 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
558 return (val);
559}
560
561static void
562bge_writemem_ind(struct bge_softc *sc, int off, int val)
563{
564 device_t dev;
565
566 dev = sc->bge_dev;
567
568 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
569 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
570 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
571}
572
573#ifdef notdef
574static uint32_t
575bge_readreg_ind(struct bge_softc *sc, int off)
576{
577 device_t dev;
578
579 dev = sc->bge_dev;
580
581 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
582 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
583}
584#endif
585
586static void
587bge_writereg_ind(struct bge_softc *sc, int off, int val)
588{
589 device_t dev;
590
591 dev = sc->bge_dev;
592
593 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
594 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
595}
596
597static void
598bge_writemem_direct(struct bge_softc *sc, int off, int val)
599{
600 CSR_WRITE_4(sc, off, val);
601}
602
603static void
604bge_writembx(struct bge_softc *sc, int off, int val)
605{
606 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
607 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
608
609 CSR_WRITE_4(sc, off, val);
610}
611
612/*
613 * Map a single buffer address.
614 */
615
616static void
617bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
618{
619 struct bge_dmamap_arg *ctx;
620
621 if (error)
622 return;
623
624 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
625
626 ctx = arg;
627 ctx->bge_busaddr = segs->ds_addr;
628}
629
630static uint8_t
631bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
632{
633 uint32_t access, byte = 0;
634 int i;
635
636 /* Lock. */
637 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
638 for (i = 0; i < 8000; i++) {
639 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
640 break;
641 DELAY(20);
642 }
643 if (i == 8000)
644 return (1);
645
646 /* Enable access. */
647 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
648 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
649
650 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
651 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
652 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
653 DELAY(10);
654 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
655 DELAY(10);
656 break;
657 }
658 }
659
660 if (i == BGE_TIMEOUT * 10) {
661 if_printf(sc->bge_ifp, "nvram read timed out\n");
662 return (1);
663 }
664
665 /* Get result. */
666 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
667
668 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
669
670 /* Disable access. */
671 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
672
673 /* Unlock. */
674 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
675 CSR_READ_4(sc, BGE_NVRAM_SWARB);
676
677 return (0);
678}
679
680/*
681 * Read a sequence of bytes from NVRAM.
682 */
683static int
684bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
685{
686 int err = 0, i;
687 uint8_t byte = 0;
688
689 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
690 return (1);
691
692 for (i = 0; i < cnt; i++) {
693 err = bge_nvram_getbyte(sc, off + i, &byte);
694 if (err)
695 break;
696 *(dest + i) = byte;
697 }
698
699 return (err ? 1 : 0);
700}
701
702/*
703 * Read a byte of data stored in the EEPROM at address 'addr.' The
704 * BCM570x supports both the traditional bitbang interface and an
705 * auto access interface for reading the EEPROM. We use the auto
706 * access method.
707 */
708static uint8_t
709bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
710{
711 int i;
712 uint32_t byte = 0;
713
714 /*
715 * Enable use of auto EEPROM access so we can avoid
716 * having to use the bitbang method.
717 */
718 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
719
720 /* Reset the EEPROM, load the clock period. */
721 CSR_WRITE_4(sc, BGE_EE_ADDR,
722 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
723 DELAY(20);
724
725 /* Issue the read EEPROM command. */
726 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
727
728 /* Wait for completion */
729 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
730 DELAY(10);
731 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
732 break;
733 }
734
735 if (i == BGE_TIMEOUT * 10) {
736 device_printf(sc->bge_dev, "EEPROM read timed out\n");
737 return (1);
738 }
739
740 /* Get result. */
741 byte = CSR_READ_4(sc, BGE_EE_DATA);
742
743 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
744
745 return (0);
746}
747
748/*
749 * Read a sequence of bytes from the EEPROM.
750 */
751static int
752bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
753{
754 int i, error = 0;
755 uint8_t byte = 0;
756
757 for (i = 0; i < cnt; i++) {
758 error = bge_eeprom_getbyte(sc, off + i, &byte);
759 if (error)
760 break;
761 *(dest + i) = byte;
762 }
763
764 return (error ? 1 : 0);
765}
766
767static int
768bge_miibus_readreg(device_t dev, int phy, int reg)
769{
770 struct bge_softc *sc;
771 uint32_t val, autopoll;
772 int i;
773
774 sc = device_get_softc(dev);
775
776 /*
777 * Broadcom's own driver always assumes the internal
778 * PHY is at GMII address 1. On some chips, the PHY responds
779 * to accesses at all addresses, which could cause us to
780 * bogusly attach the PHY 32 times at probe type. Always
781 * restricting the lookup to address 1 is simpler than
782 * trying to figure out which chips revisions should be
783 * special-cased.
784 */
785 if (phy != 1)
786 return (0);
787
788 /* Reading with autopolling on may trigger PCI errors */
789 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
790 if (autopoll & BGE_MIMODE_AUTOPOLL) {
791 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
792 DELAY(40);
793 }
794
795 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
796 BGE_MIPHY(phy) | BGE_MIREG(reg));
797
798 for (i = 0; i < BGE_TIMEOUT; i++) {
799 DELAY(10);
800 val = CSR_READ_4(sc, BGE_MI_COMM);
801 if (!(val & BGE_MICOMM_BUSY))
802 break;
803 }
804
805 if (i == BGE_TIMEOUT) {
806 device_printf(sc->bge_dev,
807 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
808 phy, reg, val);
809 val = 0;
810 goto done;
811 }
812
813 DELAY(5);
814 val = CSR_READ_4(sc, BGE_MI_COMM);
815
816done:
817 if (autopoll & BGE_MIMODE_AUTOPOLL) {
818 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
819 DELAY(40);
820 }
821
822 if (val & BGE_MICOMM_READFAIL)
823 return (0);
824
825 return (val & 0xFFFF);
826}
827
828static int
829bge_miibus_writereg(device_t dev, int phy, int reg, int val)
830{
831 struct bge_softc *sc;
832 uint32_t autopoll;
833 int i;
834
835 sc = device_get_softc(dev);
836
837 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
838 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
839 return (0);
840
841 /* Reading with autopolling on may trigger PCI errors */
842 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
843 if (autopoll & BGE_MIMODE_AUTOPOLL) {
844 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
845 DELAY(40);
846 }
847
848 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
849 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
850
851 for (i = 0; i < BGE_TIMEOUT; i++) {
852 DELAY(10);
853 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
854 DELAY(5);
855 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
856 break;
857 }
858 }
859
860 if (i == BGE_TIMEOUT) {
861 device_printf(sc->bge_dev,
862 "PHY write timed out (phy %d, reg %d, val %d)\n",
863 phy, reg, val);
864 return (0);
865 }
866
867 if (autopoll & BGE_MIMODE_AUTOPOLL) {
868 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
869 DELAY(40);
870 }
871
872 return (0);
873}
874
875static void
876bge_miibus_statchg(device_t dev)
877{
878 struct bge_softc *sc;
879 struct mii_data *mii;
880 sc = device_get_softc(dev);
881 mii = device_get_softc(sc->bge_miibus);
882
883 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
884 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
885 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
886 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
887 else
888 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
889
890 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
891 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
892 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG1)
893 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
894 else
895 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
896 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0)
897 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
898 else
899 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
900 } else {
901 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
902 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
903 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
904 }
905}
906
907/*
908 * Intialize a standard receive ring descriptor.
909 */
910static int
911bge_newbuf_std(struct bge_softc *sc, int i)
912{
913 struct mbuf *m;
914 struct bge_rx_bd *r;
915 bus_dma_segment_t segs[1];
916 bus_dmamap_t map;
917 int error, nsegs;
918
919 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
920 if (m == NULL)
921 return (ENOBUFS);
922 m->m_len = m->m_pkthdr.len = MCLBYTES;
923 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
924 m_adj(m, ETHER_ALIGN);
925
926 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
927 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
928 if (error != 0) {
929 m_freem(m);
930 return (error);
931 }
932 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
933 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
934 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
935 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
936 sc->bge_cdata.bge_rx_std_dmamap[i]);
937 }
938 map = sc->bge_cdata.bge_rx_std_dmamap[i];
939 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
940 sc->bge_cdata.bge_rx_std_sparemap = map;
941 sc->bge_cdata.bge_rx_std_chain[i] = m;
942 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
943 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
944 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
945 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
946 r->bge_flags = BGE_RXBDFLAG_END;
947 r->bge_len = segs[0].ds_len;
948 r->bge_idx = i;
949
950 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
951 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
952
953 return (0);
954}
955
956/*
957 * Initialize a jumbo receive ring descriptor. This allocates
958 * a jumbo buffer from the pool managed internally by the driver.
959 */
960static int
961bge_newbuf_jumbo(struct bge_softc *sc, int i)
962{
963 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
964 bus_dmamap_t map;
965 struct bge_extrx_bd *r;
966 struct mbuf *m;
967 int error, nsegs;
968
969 MGETHDR(m, M_DONTWAIT, MT_DATA);
970 if (m == NULL)
971 return (ENOBUFS);
972
973 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
974 if (!(m->m_flags & M_EXT)) {
975 m_freem(m);
976 return (ENOBUFS);
977 }
978 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
979 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
980 m_adj(m, ETHER_ALIGN);
981
982 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
983 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
984 if (error != 0) {
985 m_freem(m);
986 return (error);
987 }
988
989 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
990 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
991 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
992 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
993 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
994 }
995 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
996 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
997 sc->bge_cdata.bge_rx_jumbo_sparemap;
998 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
999 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1000 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1001 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1002 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1003 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1004
1005 /*
1006 * Fill in the extended RX buffer descriptor.
1007 */
1008 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1009 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1010 r->bge_idx = i;
1011 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1012 switch (nsegs) {
1013 case 4:
1014 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1015 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1016 r->bge_len3 = segs[3].ds_len;
1017 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1018 case 3:
1019 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1020 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1021 r->bge_len2 = segs[2].ds_len;
1022 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1023 case 2:
1024 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1025 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1026 r->bge_len1 = segs[1].ds_len;
1027 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1028 case 1:
1029 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1030 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1031 r->bge_len0 = segs[0].ds_len;
1032 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1033 break;
1034 default:
1035 panic("%s: %d segments\n", __func__, nsegs);
1036 }
1037
1038 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1039 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1040
1041 return (0);
1042}
1043
1044static int
1045bge_init_rx_ring_std(struct bge_softc *sc)
1046{
1047 int error, i;
1048
1049 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1050 sc->bge_std = 0;
1051 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1052 if ((error = bge_newbuf_std(sc, i)) != 0)
1053 return (error);
1054 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1055 }
1056
1057 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1058 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1059
1060 sc->bge_std = 0;
1061 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1062
1063 return (0);
1064}
1065
1066static void
1067bge_free_rx_ring_std(struct bge_softc *sc)
1068{
1069 int i;
1070
1071 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1072 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1073 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1074 sc->bge_cdata.bge_rx_std_dmamap[i],
1075 BUS_DMASYNC_POSTREAD);
1076 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1077 sc->bge_cdata.bge_rx_std_dmamap[i]);
1078 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1079 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1080 }
1081 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1082 sizeof(struct bge_rx_bd));
1083 }
1084}
1085
1086static int
1087bge_init_rx_ring_jumbo(struct bge_softc *sc)
1088{
1089 struct bge_rcb *rcb;
1090 int error, i;
1091
1092 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1093 sc->bge_jumbo = 0;
1094 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1095 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1096 return (error);
1097 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1098 }
1099
1100 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1101 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1102
1103 sc->bge_jumbo = 0;
1104
1105 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1106 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1107 BGE_RCB_FLAG_USE_EXT_RX_BD);
1108 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1109
1110 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1111
1112 return (0);
1113}
1114
1115static void
1116bge_free_rx_ring_jumbo(struct bge_softc *sc)
1117{
1118 int i;
1119
1120 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1121 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1122 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1123 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1124 BUS_DMASYNC_POSTREAD);
1125 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1126 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1127 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1128 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1129 }
1130 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1131 sizeof(struct bge_extrx_bd));
1132 }
1133}
1134
1135static void
1136bge_free_tx_ring(struct bge_softc *sc)
1137{
1138 int i;
1139
1140 if (sc->bge_ldata.bge_tx_ring == NULL)
1141 return;
1142
1143 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1144 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1145 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1146 sc->bge_cdata.bge_tx_dmamap[i],
1147 BUS_DMASYNC_POSTWRITE);
1148 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1149 sc->bge_cdata.bge_tx_dmamap[i]);
1150 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1151 sc->bge_cdata.bge_tx_chain[i] = NULL;
1152 }
1153 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1154 sizeof(struct bge_tx_bd));
1155 }
1156}
1157
1158static int
1159bge_init_tx_ring(struct bge_softc *sc)
1160{
1161 sc->bge_txcnt = 0;
1162 sc->bge_tx_saved_considx = 0;
1163
1164 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1165 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1166 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1167
1168 /* Initialize transmit producer index for host-memory send ring. */
1169 sc->bge_tx_prodidx = 0;
1170 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1171
1172 /* 5700 b2 errata */
1173 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1174 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1175
1176 /* NIC-memory send ring not used; initialize to zero. */
1177 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1178 /* 5700 b2 errata */
1179 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1180 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1181
1182 return (0);
1183}
1184
1185static void
1186bge_setpromisc(struct bge_softc *sc)
1187{
1188 struct ifnet *ifp;
1189
1190 BGE_LOCK_ASSERT(sc);
1191
1192 ifp = sc->bge_ifp;
1193
1194 /* Enable or disable promiscuous mode as needed. */
1195 if (ifp->if_flags & IFF_PROMISC)
1196 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1197 else
1198 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1199}
1200
1201static void
1202bge_setmulti(struct bge_softc *sc)
1203{
1204 struct ifnet *ifp;
1205 struct ifmultiaddr *ifma;
1206 uint32_t hashes[4] = { 0, 0, 0, 0 };
1207 int h, i;
1208
1209 BGE_LOCK_ASSERT(sc);
1210
1211 ifp = sc->bge_ifp;
1212
1213 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1214 for (i = 0; i < 4; i++)
1215 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1216 return;
1217 }
1218
1219 /* First, zot all the existing filters. */
1220 for (i = 0; i < 4; i++)
1221 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1222
1223 /* Now program new ones. */
1224 if_maddr_rlock(ifp);
1225 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1226 if (ifma->ifma_addr->sa_family != AF_LINK)
1227 continue;
1228 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1229 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1230 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1231 }
1232 if_maddr_runlock(ifp);
1233
1234 for (i = 0; i < 4; i++)
1235 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1236}
1237
1238static void
1239bge_setvlan(struct bge_softc *sc)
1240{
1241 struct ifnet *ifp;
1242
1243 BGE_LOCK_ASSERT(sc);
1244
1245 ifp = sc->bge_ifp;
1246
1247 /* Enable or disable VLAN tag stripping as needed. */
1248 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1249 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1250 else
1251 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1252}
1253
1254static void
1255bge_sig_pre_reset(struct bge_softc *sc, int type)
1256{
1257
1258 /*
1259 * Some chips don't like this so only do this if ASF is enabled
1260 */
1261 if (sc->bge_asf_mode)
1262 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1263
1264 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1265 switch (type) {
1266 case BGE_RESET_START:
1267 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1268 break;
1269 case BGE_RESET_STOP:
1270 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1271 break;
1272 }
1273 }
1274}
1275
1276static void
1277bge_sig_post_reset(struct bge_softc *sc, int type)
1278{
1279
1280 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1281 switch (type) {
1282 case BGE_RESET_START:
1283 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1284 /* START DONE */
1285 break;
1286 case BGE_RESET_STOP:
1287 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1288 break;
1289 }
1290 }
1291}
1292
1293static void
1294bge_sig_legacy(struct bge_softc *sc, int type)
1295{
1296
1297 if (sc->bge_asf_mode) {
1298 switch (type) {
1299 case BGE_RESET_START:
1300 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1301 break;
1302 case BGE_RESET_STOP:
1303 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1304 break;
1305 }
1306 }
1307}
1308
1309static void
1310bge_stop_fw(struct bge_softc *sc)
1311{
1312 int i;
1313
1314 if (sc->bge_asf_mode) {
1315 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1316 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1317 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1318
1319 for (i = 0; i < 100; i++ ) {
1320 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1321 break;
1322 DELAY(10);
1323 }
1324 }
1325}
1326
1327/*
1328 * Do endian, PCI and DMA initialization.
1329 */
1330static int
1331bge_chipinit(struct bge_softc *sc)
1332{
1333 uint32_t dma_rw_ctl;
1334 uint16_t val;
1335 int i;
1336
1337 /* Set endianness before we access any non-PCI registers. */
1338 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1339
1340 /* Clear the MAC control register */
1341 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1342
1343 /*
1344 * Clear the MAC statistics block in the NIC's
1345 * internal memory.
1346 */
1347 for (i = BGE_STATS_BLOCK;
1348 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1349 BGE_MEMWIN_WRITE(sc, i, 0);
1350
1351 for (i = BGE_STATUS_BLOCK;
1352 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1353 BGE_MEMWIN_WRITE(sc, i, 0);
1354
1355 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1356 /*
1357 * Fix data corruption caused by non-qword write with WB.
1358 * Fix master abort in PCI mode.
1359 * Fix PCI latency timer.
1360 */
1361 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1362 val |= (1 << 10) | (1 << 12) | (1 << 13);
1363 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1364 }
1365
1366 /*
1367 * Set up the PCI DMA control register.
1368 */
1369 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1370 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1371 if (sc->bge_flags & BGE_FLAG_PCIE) {
1372 /* Read watermark not used, 128 bytes for write. */
1373 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1374 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1375 if (BGE_IS_5714_FAMILY(sc)) {
1376 /* 256 bytes for read and write. */
1377 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1378 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1379 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1380 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1381 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1382 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1383 /*
1384 * In the BCM5703, the DMA read watermark should
1385 * be set to less than or equal to the maximum
1386 * memory read byte count of the PCI-X command
1387 * register.
1388 */
1389 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1390 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1391 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1392 /* 1536 bytes for read, 384 bytes for write. */
1393 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1394 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1395 } else {
1396 /* 384 bytes for read and write. */
1397 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1398 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1399 0x0F;
1400 }
1401 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1402 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1403 uint32_t tmp;
1404
1405 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1406 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1407 if (tmp == 6 || tmp == 7)
1408 dma_rw_ctl |=
1409 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1410
1411 /* Set PCI-X DMA write workaround. */
1412 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1413 }
1414 } else {
1415 /* Conventional PCI bus: 256 bytes for read and write. */
1416 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1417 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1418
1419 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1420 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1421 dma_rw_ctl |= 0x0F;
1422 }
1423 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1424 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1425 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1426 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1427 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1428 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1429 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1430 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1431
1432 /*
1433 * Set up general mode register.
1434 */
1435 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1436 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1437 BGE_MODECTL_TX_NO_PHDR_CSUM);
1438
1439 /*
1440 * BCM5701 B5 have a bug causing data corruption when using
1441 * 64-bit DMA reads, which can be terminated early and then
1442 * completed later as 32-bit accesses, in combination with
1443 * certain bridges.
1444 */
1445 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1446 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1447 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1448
1449 /*
1450 * Tell the firmware the driver is running
1451 */
1452 if (sc->bge_asf_mode & ASF_STACKUP)
1453 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1454
1455 /*
1456 * Disable memory write invalidate. Apparently it is not supported
1457 * properly by these devices. Also ensure that INTx isn't disabled,
1458 * as these chips need it even when using MSI.
1459 */
1460 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1461 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1462
1463 /* Set the timer prescaler (always 66Mhz) */
1464 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1465
1466 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1468 DELAY(40); /* XXX */
1469
1470 /* Put PHY into ready state */
1471 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1472 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1473 DELAY(40);
1474 }
1475
1476 return (0);
1477}
1478
1479static int
1480bge_blockinit(struct bge_softc *sc)
1481{
1482 struct bge_rcb *rcb;
1483 bus_size_t vrcb;
1484 bge_hostaddr taddr;
1485 uint32_t val;
1486 int i;
1487
1488 /*
1489 * Initialize the memory window pointer register so that
1490 * we can access the first 32K of internal NIC RAM. This will
1491 * allow us to set up the TX send ring RCBs and the RX return
1492 * ring RCBs, plus other things which live in NIC memory.
1493 */
1494 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1495
1496 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1497
1498 if (!(BGE_IS_5705_PLUS(sc))) {
1499 /* Configure mbuf memory pool */
1500 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1501 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1502 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1503 else
1504 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1505
1506 /* Configure DMA resource pool */
1507 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1508 BGE_DMA_DESCRIPTORS);
1509 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1510 }
1511
1512 /* Configure mbuf pool watermarks */
1513 if (!BGE_IS_5705_PLUS(sc)) {
1514 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1515 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1516 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1517 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1518 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1519 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1520 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1521 } else {
1522 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1523 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1524 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1525 }
1526
1527 /* Configure DMA resource watermarks */
1528 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1529 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1530
1531 /* Enable buffer manager */
1532 if (!(BGE_IS_5705_PLUS(sc))) {
1533 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1534 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1535
1536 /* Poll for buffer manager start indication */
1537 for (i = 0; i < BGE_TIMEOUT; i++) {
1538 DELAY(10);
1539 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1540 break;
1541 }
1542
1543 if (i == BGE_TIMEOUT) {
1544 device_printf(sc->bge_dev,
1545 "buffer manager failed to start\n");
1546 return (ENXIO);
1547 }
1548 }
1549
1550 /* Enable flow-through queues */
1551 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1552 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1553
1554 /* Wait until queue initialization is complete */
1555 for (i = 0; i < BGE_TIMEOUT; i++) {
1556 DELAY(10);
1557 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1558 break;
1559 }
1560
1561 if (i == BGE_TIMEOUT) {
1562 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1563 return (ENXIO);
1564 }
1565
1566 /* Initialize the standard RX ring control block */
1567 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1568 rcb->bge_hostaddr.bge_addr_lo =
1569 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1570 rcb->bge_hostaddr.bge_addr_hi =
1571 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1572 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1573 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1574 if (BGE_IS_5705_PLUS(sc))
1575 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1576 else
1577 rcb->bge_maxlen_flags =
1578 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1579 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1580 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1581 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1582
1583 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1584 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1585
1586 /*
1587 * Initialize the jumbo RX ring control block
1588 * We set the 'ring disabled' bit in the flags
1589 * field until we're actually ready to start
1590 * using this ring (i.e. once we set the MTU
1591 * high enough to require it).
1592 */
1593 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1594 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1595
1596 rcb->bge_hostaddr.bge_addr_lo =
1597 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1598 rcb->bge_hostaddr.bge_addr_hi =
1599 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1600 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1601 sc->bge_cdata.bge_rx_jumbo_ring_map,
1602 BUS_DMASYNC_PREREAD);
1603 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1604 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1605 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1606 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1607 rcb->bge_hostaddr.bge_addr_hi);
1608 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1609 rcb->bge_hostaddr.bge_addr_lo);
1610
1611 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1612 rcb->bge_maxlen_flags);
1613 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1614
1615 /* Set up dummy disabled mini ring RCB */
1616 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1617 rcb->bge_maxlen_flags =
1618 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1619 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1620 rcb->bge_maxlen_flags);
1621 }
1622
1623 /*
1624 * Set the BD ring replentish thresholds. The recommended
1625 * values are 1/8th the number of descriptors allocated to
1626 * each ring.
1627 * XXX The 5754 requires a lower threshold, so it might be a
1628 * requirement of all 575x family chips. The Linux driver sets
1629 * the lower threshold for all 5705 family chips as well, but there
1630 * are reports that it might not need to be so strict.
1631 *
1632 * XXX Linux does some extra fiddling here for the 5906 parts as
1633 * well.
1634 */
1635 if (BGE_IS_5705_PLUS(sc))
1636 val = 8;
1637 else
1638 val = BGE_STD_RX_RING_CNT / 8;
1639 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1640 if (BGE_IS_JUMBO_CAPABLE(sc))
1641 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1642 BGE_JUMBO_RX_RING_CNT/8);
1643
1644 /*
1645 * Disable all unused send rings by setting the 'ring disabled'
1646 * bit in the flags field of all the TX send ring control blocks.
1647 * These are located in NIC memory.
1648 */
1649 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1650 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1651 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1652 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1653 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1654 vrcb += sizeof(struct bge_rcb);
1655 }
1656
1657 /* Configure TX RCB 0 (we use only the first ring) */
1658 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1659 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1660 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1661 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1662 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1663 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1664 if (!(BGE_IS_5705_PLUS(sc)))
1665 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1666 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1667
1668 /* Disable all unused RX return rings */
1669 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1670 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1671 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1672 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1673 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1674 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1675 BGE_RCB_FLAG_RING_DISABLED));
1676 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1677 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1678 (i * (sizeof(uint64_t))), 0);
1679 vrcb += sizeof(struct bge_rcb);
1680 }
1681
1682 /* Initialize RX ring indexes */
1683 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1684 if (BGE_IS_JUMBO_CAPABLE(sc))
1685 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1686 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1687 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1688
1689 /*
1690 * Set up RX return ring 0
1691 * Note that the NIC address for RX return rings is 0x00000000.
1692 * The return rings live entirely within the host, so the
1693 * nicaddr field in the RCB isn't used.
1694 */
1695 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1696 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1697 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1698 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1699 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1700 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1701 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1702
1703 /* Set random backoff seed for TX */
1704 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1705 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1706 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1707 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1708 BGE_TX_BACKOFF_SEED_MASK);
1709
1710 /* Set inter-packet gap */
1711 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1712
1713 /*
1714 * Specify which ring to use for packets that don't match
1715 * any RX rules.
1716 */
1717 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1718
1719 /*
1720 * Configure number of RX lists. One interrupt distribution
1721 * list, sixteen active lists, one bad frames class.
1722 */
1723 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1724
1725 /* Inialize RX list placement stats mask. */
1726 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1727 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1728
1729 /* Disable host coalescing until we get it set up */
1730 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1731
1732 /* Poll to make sure it's shut down. */
1733 for (i = 0; i < BGE_TIMEOUT; i++) {
1734 DELAY(10);
1735 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1736 break;
1737 }
1738
1739 if (i == BGE_TIMEOUT) {
1740 device_printf(sc->bge_dev,
1741 "host coalescing engine failed to idle\n");
1742 return (ENXIO);
1743 }
1744
1745 /* Set up host coalescing defaults */
1746 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1747 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1748 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1749 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1750 if (!(BGE_IS_5705_PLUS(sc))) {
1751 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1752 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1753 }
1754 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1755 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1756
1757 /* Set up address of statistics block */
1758 if (!(BGE_IS_5705_PLUS(sc))) {
1759 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1760 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1761 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1762 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1763 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1764 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1765 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1766 }
1767
1768 /* Set up address of status block */
1769 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1770 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1771 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1772 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1773
1774 /* Set up status block size. */
1775 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1776 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1777 val = BGE_STATBLKSZ_FULL;
1778 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1779 } else {
1780 val = BGE_STATBLKSZ_32BYTE;
1781 bzero(sc->bge_ldata.bge_status_block, 32);
1782 }
1783 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1784 sc->bge_cdata.bge_status_map,
1785 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1786
1787 /* Turn on host coalescing state machine */
1788 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1789
1790 /* Turn on RX BD completion state machine and enable attentions */
1791 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1792 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1793
1794 /* Turn on RX list placement state machine */
1795 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1796
1797 /* Turn on RX list selector state machine. */
1798 if (!(BGE_IS_5705_PLUS(sc)))
1799 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1800
1801 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1802 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1803 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1804 BGE_MACMODE_FRMHDR_DMA_ENB;
1805
1806 if (sc->bge_flags & BGE_FLAG_TBI)
1807 val |= BGE_PORTMODE_TBI;
1808 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1809 val |= BGE_PORTMODE_GMII;
1810 else
1811 val |= BGE_PORTMODE_MII;
1812
1813 /* Turn on DMA, clear stats */
1814 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1815
1816 /* Set misc. local control, enable interrupts on attentions */
1817 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1818
1819#ifdef notdef
1820 /* Assert GPIO pins for PHY reset */
1821 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1822 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1823 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1824 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1825#endif
1826
1827 /* Turn on DMA completion state machine */
1828 if (!(BGE_IS_5705_PLUS(sc)))
1829 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1830
1831 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1832
1833 /* Enable host coalescing bug fix. */
1834 if (BGE_IS_5755_PLUS(sc))
1835 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1836
1837 /* Request larger DMA burst size to get better performance. */
1838 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1839 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1840
1841 /* Turn on write DMA state machine */
1842 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1843 DELAY(40);
1844
1845 /* Turn on read DMA state machine */
1846 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1847 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1848 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1849 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1850 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1851 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1852 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1853 if (sc->bge_flags & BGE_FLAG_PCIE)
1854 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1855 if (sc->bge_flags & BGE_FLAG_TSO) {
1856 val |= BGE_RDMAMODE_TSO4_ENABLE;
1857 if (sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1858 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1859 val |= BGE_RDMAMODE_TSO6_ENABLE;
1860 }
1861 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1862 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1863 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1864 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1865 /*
1866 * Enable fix for read DMA FIFO overruns.
1867 * The fix is to limit the number of RX BDs
1868 * the hardware would fetch at a fime.
1869 */
1870 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1871 CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
1872 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1873 }
1861 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1862 DELAY(40);
1863
1864 /* Turn on RX data completion state machine */
1865 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1866
1867 /* Turn on RX BD initiator state machine */
1868 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1869
1870 /* Turn on RX data and RX BD initiator state machine */
1871 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1872
1873 /* Turn on Mbuf cluster free state machine */
1874 if (!(BGE_IS_5705_PLUS(sc)))
1875 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1876
1877 /* Turn on send BD completion state machine */
1878 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1879
1880 /* Turn on send data completion state machine */
1881 val = BGE_SDCMODE_ENABLE;
1882 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1883 val |= BGE_SDCMODE_CDELAY;
1884 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1885
1886 /* Turn on send data initiator state machine */
1887 if (sc->bge_flags & BGE_FLAG_TSO)
1888 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1889 else
1890 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1891
1892 /* Turn on send BD initiator state machine */
1893 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1894
1895 /* Turn on send BD selector state machine */
1896 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1897
1898 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1899 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1900 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1901
1902 /* ack/clear link change events */
1903 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1904 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1905 BGE_MACSTAT_LINK_CHANGED);
1906 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1907
1908 /* Enable PHY auto polling (for MII/GMII only) */
1909 if (sc->bge_flags & BGE_FLAG_TBI) {
1910 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1911 } else {
1912 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1913 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1914 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1915 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1916 BGE_EVTENB_MI_INTERRUPT);
1917 }
1918
1919 /*
1920 * Clear any pending link state attention.
1921 * Otherwise some link state change events may be lost until attention
1922 * is cleared by bge_intr() -> bge_link_upd() sequence.
1923 * It's not necessary on newer BCM chips - perhaps enabling link
1924 * state change attentions implies clearing pending attention.
1925 */
1926 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1927 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1928 BGE_MACSTAT_LINK_CHANGED);
1929
1930 /* Enable link state change attentions. */
1931 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1932
1933 return (0);
1934}
1935
1936const struct bge_revision *
1937bge_lookup_rev(uint32_t chipid)
1938{
1939 const struct bge_revision *br;
1940
1941 for (br = bge_revisions; br->br_name != NULL; br++) {
1942 if (br->br_chipid == chipid)
1943 return (br);
1944 }
1945
1946 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1947 if (br->br_chipid == BGE_ASICREV(chipid))
1948 return (br);
1949 }
1950
1951 return (NULL);
1952}
1953
1954const struct bge_vendor *
1955bge_lookup_vendor(uint16_t vid)
1956{
1957 const struct bge_vendor *v;
1958
1959 for (v = bge_vendors; v->v_name != NULL; v++)
1960 if (v->v_id == vid)
1961 return (v);
1962
1963 panic("%s: unknown vendor %d", __func__, vid);
1964 return (NULL);
1965}
1966
1967/*
1968 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1969 * against our list and return its name if we find a match.
1970 *
1971 * Note that since the Broadcom controller contains VPD support, we
1972 * try to get the device name string from the controller itself instead
1973 * of the compiled-in string. It guarantees we'll always announce the
1974 * right product name. We fall back to the compiled-in string when
1975 * VPD is unavailable or corrupt.
1976 */
1977static int
1978bge_probe(device_t dev)
1979{
1980 const struct bge_type *t = bge_devs;
1981 struct bge_softc *sc = device_get_softc(dev);
1982 uint16_t vid, did;
1983
1984 sc->bge_dev = dev;
1985 vid = pci_get_vendor(dev);
1986 did = pci_get_device(dev);
1987 while(t->bge_vid != 0) {
1988 if ((vid == t->bge_vid) && (did == t->bge_did)) {
1989 char model[64], buf[96];
1990 const struct bge_revision *br;
1991 const struct bge_vendor *v;
1992 uint32_t id;
1993
1994 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1995 BGE_PCIMISCCTL_ASICREV_SHIFT;
1996 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
1997 id = pci_read_config(dev,
1998 BGE_PCI_PRODID_ASICREV, 4);
1999 br = bge_lookup_rev(id);
2000 v = bge_lookup_vendor(vid);
2001 {
2002#if __FreeBSD_version > 700024
2003 const char *pname;
2004
2005 if (bge_has_eaddr(sc) &&
2006 pci_get_vpd_ident(dev, &pname) == 0)
2007 snprintf(model, 64, "%s", pname);
2008 else
2009#endif
2010 snprintf(model, 64, "%s %s",
2011 v->v_name,
2012 br != NULL ? br->br_name :
2013 "NetXtreme Ethernet Controller");
2014 }
2015 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2016 br != NULL ? "" : "unknown ", id);
2017 device_set_desc_copy(dev, buf);
2018 return (0);
2019 }
2020 t++;
2021 }
2022
2023 return (ENXIO);
2024}
2025
2026static void
2027bge_dma_free(struct bge_softc *sc)
2028{
2029 int i;
2030
2031 /* Destroy DMA maps for RX buffers. */
2032 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2033 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2034 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2035 sc->bge_cdata.bge_rx_std_dmamap[i]);
2036 }
2037 if (sc->bge_cdata.bge_rx_std_sparemap)
2038 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2039 sc->bge_cdata.bge_rx_std_sparemap);
2040
2041 /* Destroy DMA maps for jumbo RX buffers. */
2042 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2043 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2044 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2045 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2046 }
2047 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2048 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2049 sc->bge_cdata.bge_rx_jumbo_sparemap);
2050
2051 /* Destroy DMA maps for TX buffers. */
2052 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2053 if (sc->bge_cdata.bge_tx_dmamap[i])
2054 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2055 sc->bge_cdata.bge_tx_dmamap[i]);
2056 }
2057
2058 if (sc->bge_cdata.bge_rx_mtag)
2059 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2060 if (sc->bge_cdata.bge_tx_mtag)
2061 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2062
2063
2064 /* Destroy standard RX ring. */
2065 if (sc->bge_cdata.bge_rx_std_ring_map)
2066 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2067 sc->bge_cdata.bge_rx_std_ring_map);
2068 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2069 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2070 sc->bge_ldata.bge_rx_std_ring,
2071 sc->bge_cdata.bge_rx_std_ring_map);
2072
2073 if (sc->bge_cdata.bge_rx_std_ring_tag)
2074 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2075
2076 /* Destroy jumbo RX ring. */
2077 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2078 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2079 sc->bge_cdata.bge_rx_jumbo_ring_map);
2080
2081 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2082 sc->bge_ldata.bge_rx_jumbo_ring)
2083 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2084 sc->bge_ldata.bge_rx_jumbo_ring,
2085 sc->bge_cdata.bge_rx_jumbo_ring_map);
2086
2087 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2088 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2089
2090 /* Destroy RX return ring. */
2091 if (sc->bge_cdata.bge_rx_return_ring_map)
2092 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2093 sc->bge_cdata.bge_rx_return_ring_map);
2094
2095 if (sc->bge_cdata.bge_rx_return_ring_map &&
2096 sc->bge_ldata.bge_rx_return_ring)
2097 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2098 sc->bge_ldata.bge_rx_return_ring,
2099 sc->bge_cdata.bge_rx_return_ring_map);
2100
2101 if (sc->bge_cdata.bge_rx_return_ring_tag)
2102 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2103
2104 /* Destroy TX ring. */
2105 if (sc->bge_cdata.bge_tx_ring_map)
2106 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2107 sc->bge_cdata.bge_tx_ring_map);
2108
2109 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2110 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2111 sc->bge_ldata.bge_tx_ring,
2112 sc->bge_cdata.bge_tx_ring_map);
2113
2114 if (sc->bge_cdata.bge_tx_ring_tag)
2115 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2116
2117 /* Destroy status block. */
2118 if (sc->bge_cdata.bge_status_map)
2119 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2120 sc->bge_cdata.bge_status_map);
2121
2122 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2123 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2124 sc->bge_ldata.bge_status_block,
2125 sc->bge_cdata.bge_status_map);
2126
2127 if (sc->bge_cdata.bge_status_tag)
2128 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2129
2130 /* Destroy statistics block. */
2131 if (sc->bge_cdata.bge_stats_map)
2132 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2133 sc->bge_cdata.bge_stats_map);
2134
2135 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2136 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2137 sc->bge_ldata.bge_stats,
2138 sc->bge_cdata.bge_stats_map);
2139
2140 if (sc->bge_cdata.bge_stats_tag)
2141 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2142
2143 if (sc->bge_cdata.bge_buffer_tag)
2144 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2145
2146 /* Destroy the parent tag. */
2147 if (sc->bge_cdata.bge_parent_tag)
2148 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2149}
2150
2151static int
2152bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2153 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2154 bus_addr_t *paddr, const char *msg)
2155{
2156 struct bge_dmamap_arg ctx;
2157 bus_addr_t lowaddr;
2158 bus_size_t ring_end;
2159 int error;
2160
2161 lowaddr = BUS_SPACE_MAXADDR;
2162again:
2163 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2164 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2165 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2166 if (error != 0) {
2167 device_printf(sc->bge_dev,
2168 "could not create %s dma tag\n", msg);
2169 return (ENOMEM);
2170 }
2171 /* Allocate DMA'able memory for ring. */
2172 error = bus_dmamem_alloc(*tag, (void **)ring,
2173 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2174 if (error != 0) {
2175 device_printf(sc->bge_dev,
2176 "could not allocate DMA'able memory for %s\n", msg);
2177 return (ENOMEM);
2178 }
2179 /* Load the address of the ring. */
2180 ctx.bge_busaddr = 0;
2181 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2182 &ctx, BUS_DMA_NOWAIT);
2183 if (error != 0) {
2184 device_printf(sc->bge_dev,
2185 "could not load DMA'able memory for %s\n", msg);
2186 return (ENOMEM);
2187 }
2188 *paddr = ctx.bge_busaddr;
2189 ring_end = *paddr + maxsize;
2190 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2191 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2192 /*
2193 * 4GB boundary crossed. Limit maximum allowable DMA
2194 * address space to 32bit and try again.
2195 */
2196 bus_dmamap_unload(*tag, *map);
2197 bus_dmamem_free(*tag, *ring, *map);
2198 bus_dma_tag_destroy(*tag);
2199 if (bootverbose)
2200 device_printf(sc->bge_dev, "4GB boundary crossed, "
2201 "limit DMA address space to 32bit for %s\n", msg);
2202 *ring = NULL;
2203 *tag = NULL;
2204 *map = NULL;
2205 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2206 goto again;
2207 }
2208 return (0);
2209}
2210
2211static int
2212bge_dma_alloc(struct bge_softc *sc)
2213{
2214 bus_addr_t lowaddr;
2215 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2216 int i, error;
2217
2218 lowaddr = BUS_SPACE_MAXADDR;
2219 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2220 lowaddr = BGE_DMA_MAXADDR;
2221 /*
2222 * Allocate the parent bus DMA tag appropriate for PCI.
2223 */
2224 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2225 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2226 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2227 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2228 if (error != 0) {
2229 device_printf(sc->bge_dev,
2230 "could not allocate parent dma tag\n");
2231 return (ENOMEM);
2232 }
2233
2234 /* Create tag for standard RX ring. */
2235 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2236 &sc->bge_cdata.bge_rx_std_ring_tag,
2237 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2238 &sc->bge_cdata.bge_rx_std_ring_map,
2239 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2240 if (error)
2241 return (error);
2242
2243 /* Create tag for RX return ring. */
2244 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2245 &sc->bge_cdata.bge_rx_return_ring_tag,
2246 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2247 &sc->bge_cdata.bge_rx_return_ring_map,
2248 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2249 if (error)
2250 return (error);
2251
2252 /* Create tag for TX ring. */
2253 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2254 &sc->bge_cdata.bge_tx_ring_tag,
2255 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2256 &sc->bge_cdata.bge_tx_ring_map,
2257 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2258 if (error)
2259 return (error);
2260
2261 /*
2262 * Create tag for status block.
2263 * Because we only use single Tx/Rx/Rx return ring, use
2264 * minimum status block size except BCM5700 AX/BX which
2265 * seems to want to see full status block size regardless
2266 * of configured number of ring.
2267 */
2268 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2269 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2270 sbsz = BGE_STATUS_BLK_SZ;
2271 else
2272 sbsz = 32;
2273 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2274 &sc->bge_cdata.bge_status_tag,
2275 (uint8_t **)&sc->bge_ldata.bge_status_block,
2276 &sc->bge_cdata.bge_status_map,
2277 &sc->bge_ldata.bge_status_block_paddr, "status block");
2278 if (error)
2279 return (error);
2280
2281 /* Create tag for statistics block. */
2282 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2283 &sc->bge_cdata.bge_stats_tag,
2284 (uint8_t **)&sc->bge_ldata.bge_stats,
2285 &sc->bge_cdata.bge_stats_map,
2286 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2287 if (error)
2288 return (error);
2289
2290 /* Create tag for jumbo RX ring. */
2291 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2292 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2293 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2294 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2295 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2296 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2297 if (error)
2298 return (error);
2299 }
2300
2301 /* Create parent tag for buffers. */
2302 boundary = 0;
2303 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2304 boundary = BGE_DMA_BNDRY;
2305 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2306 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2307 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2308 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2309 if (error != 0) {
2310 device_printf(sc->bge_dev,
2311 "could not allocate buffer dma tag\n");
2312 return (ENOMEM);
2313 }
2314 /* Create tag for Tx mbufs. */
2315 if (sc->bge_flags & BGE_FLAG_TSO) {
2316 txsegsz = BGE_TSOSEG_SZ;
2317 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2318 } else {
2319 txsegsz = MCLBYTES;
2320 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2321 }
2322 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2323 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2324 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2325 &sc->bge_cdata.bge_tx_mtag);
2326
2327 if (error) {
2328 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2329 return (ENOMEM);
2330 }
2331
2332 /* Create tag for Rx mbufs. */
2333 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2334 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2335 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2336
2337 if (error) {
2338 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2339 return (ENOMEM);
2340 }
2341
2342 /* Create DMA maps for RX buffers. */
2343 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2344 &sc->bge_cdata.bge_rx_std_sparemap);
2345 if (error) {
2346 device_printf(sc->bge_dev,
2347 "can't create spare DMA map for RX\n");
2348 return (ENOMEM);
2349 }
2350 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2351 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2352 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2353 if (error) {
2354 device_printf(sc->bge_dev,
2355 "can't create DMA map for RX\n");
2356 return (ENOMEM);
2357 }
2358 }
2359
2360 /* Create DMA maps for TX buffers. */
2361 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2362 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2363 &sc->bge_cdata.bge_tx_dmamap[i]);
2364 if (error) {
2365 device_printf(sc->bge_dev,
2366 "can't create DMA map for TX\n");
2367 return (ENOMEM);
2368 }
2369 }
2370
2371 /* Create tags for jumbo RX buffers. */
2372 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2373 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2374 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2375 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2376 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2377 if (error) {
2378 device_printf(sc->bge_dev,
2379 "could not allocate jumbo dma tag\n");
2380 return (ENOMEM);
2381 }
2382 /* Create DMA maps for jumbo RX buffers. */
2383 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2384 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2385 if (error) {
2386 device_printf(sc->bge_dev,
2387 "can't create spare DMA map for jumbo RX\n");
2388 return (ENOMEM);
2389 }
2390 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2391 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2392 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2393 if (error) {
2394 device_printf(sc->bge_dev,
2395 "can't create DMA map for jumbo RX\n");
2396 return (ENOMEM);
2397 }
2398 }
2399 }
2400
2401 return (0);
2402}
2403
2404/*
2405 * Return true if this device has more than one port.
2406 */
2407static int
2408bge_has_multiple_ports(struct bge_softc *sc)
2409{
2410 device_t dev = sc->bge_dev;
2411 u_int b, d, f, fscan, s;
2412
2413 d = pci_get_domain(dev);
2414 b = pci_get_bus(dev);
2415 s = pci_get_slot(dev);
2416 f = pci_get_function(dev);
2417 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2418 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2419 return (1);
2420 return (0);
2421}
2422
2423/*
2424 * Return true if MSI can be used with this device.
2425 */
2426static int
2427bge_can_use_msi(struct bge_softc *sc)
2428{
2429 int can_use_msi = 0;
2430
2431 switch (sc->bge_asicrev) {
2432 case BGE_ASICREV_BCM5714_A0:
2433 case BGE_ASICREV_BCM5714:
2434 /*
2435 * Apparently, MSI doesn't work when these chips are
2436 * configured in single-port mode.
2437 */
2438 if (bge_has_multiple_ports(sc))
2439 can_use_msi = 1;
2440 break;
2441 case BGE_ASICREV_BCM5750:
2442 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2443 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2444 can_use_msi = 1;
2445 break;
2446 default:
2447 if (BGE_IS_575X_PLUS(sc))
2448 can_use_msi = 1;
2449 }
2450 return (can_use_msi);
2451}
2452
2453static int
2454bge_attach(device_t dev)
2455{
2456 struct ifnet *ifp;
2457 struct bge_softc *sc;
2458 uint32_t hwcfg = 0, misccfg;
2459 u_char eaddr[ETHER_ADDR_LEN];
2460 int error, msicount, reg, rid, trys;
2461
2462 sc = device_get_softc(dev);
2463 sc->bge_dev = dev;
2464
2465 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2466
2467 /*
2468 * Map control/status registers.
2469 */
2470 pci_enable_busmaster(dev);
2471
2472 rid = PCIR_BAR(0);
2473 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2474 RF_ACTIVE);
2475
2476 if (sc->bge_res == NULL) {
2477 device_printf (sc->bge_dev, "couldn't map memory\n");
2478 error = ENXIO;
2479 goto fail;
2480 }
2481
2482 /* Save various chip information. */
2483 sc->bge_chipid =
2484 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2485 BGE_PCIMISCCTL_ASICREV_SHIFT;
2486 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2487 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2488 4);
2489 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2490 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2491
2492 /*
2493 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2494 * 5705 A0 and A1 chips.
2495 */
2496 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2497 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2498 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2499 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2500 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2501
2502 if (bge_has_eaddr(sc))
2503 sc->bge_flags |= BGE_FLAG_EADDR;
2504
2505 /* Save chipset family. */
2506 switch (sc->bge_asicrev) {
2507 case BGE_ASICREV_BCM5755:
2508 case BGE_ASICREV_BCM5761:
2509 case BGE_ASICREV_BCM5784:
2510 case BGE_ASICREV_BCM5785:
2511 case BGE_ASICREV_BCM5787:
2512 case BGE_ASICREV_BCM57780:
2513 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2514 BGE_FLAG_5705_PLUS;
2515 break;
2516 case BGE_ASICREV_BCM5700:
2517 case BGE_ASICREV_BCM5701:
2518 case BGE_ASICREV_BCM5703:
2519 case BGE_ASICREV_BCM5704:
2520 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2521 break;
2522 case BGE_ASICREV_BCM5714_A0:
2523 case BGE_ASICREV_BCM5780:
2524 case BGE_ASICREV_BCM5714:
2525 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2526 /* FALLTHROUGH */
2527 case BGE_ASICREV_BCM5750:
2528 case BGE_ASICREV_BCM5752:
2529 case BGE_ASICREV_BCM5906:
2530 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2531 /* FALLTHROUGH */
2532 case BGE_ASICREV_BCM5705:
2533 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2534 break;
2535 }
2536
2537 /* Set various bug flags. */
2538 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2539 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2540 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2541 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2542 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2543 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2544 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2545 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2546 if (pci_get_subvendor(dev) == DELL_VENDORID)
2547 sc->bge_flags |= BGE_FLAG_NO_3LED;
2548 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2549 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
2550 if (BGE_IS_5705_PLUS(sc) &&
2551 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2552 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2553 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2554 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2555 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2556 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2557 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2558 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2559 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2560 sc->bge_flags |= BGE_FLAG_BER_BUG;
2561 }
2562
2563 /*
2564 * All controllers that are not 5755 or higher have 4GB
2565 * boundary DMA bug.
2566 * Whenever an address crosses a multiple of the 4GB boundary
2567 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2568 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2569 * state machine will lockup and cause the device to hang.
2570 */
2571 if (BGE_IS_5755_PLUS(sc) == 0)
2572 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2573
2574 /*
2575 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2576 * but I do not know the DEVICEID for the 5788M.
2577 */
2578 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2579 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2580 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2581 sc->bge_flags |= BGE_FLAG_5788;
2582
2583 /*
2584 * Some controllers seem to require a special firmware to use
2585 * TSO. But the firmware is not available to FreeBSD and Linux
2586 * claims that the TSO performed by the firmware is slower than
2587 * hardware based TSO. Moreover the firmware based TSO has one
2588 * known bug which can't handle TSO if ethernet header + IP/TCP
2589 * header is greater than 80 bytes. The workaround for the TSO
2590 * bug exist but it seems it's too expensive than not using
2591 * TSO at all. Some hardwares also have the TSO bug so limit
2592 * the TSO to the controllers that are not affected TSO issues
2593 * (e.g. 5755 or higher).
2594 */
2595 if (BGE_IS_5755_PLUS(sc)) {
2596 /*
2597 * BCM5754 and BCM5787 shares the same ASIC id so
2598 * explicit device id check is required.
2599 * Due to unknown reason TSO does not work on BCM5755M.
2600 */
2601 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2602 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2603 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2604 sc->bge_flags |= BGE_FLAG_TSO;
2605 }
2606
2607 /*
2608 * Check if this is a PCI-X or PCI Express device.
2609 */
2610 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2611 /*
2612 * Found a PCI Express capabilities register, this
2613 * must be a PCI Express device.
2614 */
2615 sc->bge_flags |= BGE_FLAG_PCIE;
2616 sc->bge_expcap = reg;
2617 if (pci_get_max_read_req(dev) != 4096)
2618 pci_set_max_read_req(dev, 4096);
2619 } else {
2620 /*
2621 * Check if the device is in PCI-X Mode.
2622 * (This bit is not valid on PCI Express controllers.)
2623 */
2624 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2625 sc->bge_pcixcap = reg;
2626 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2627 BGE_PCISTATE_PCI_BUSMODE) == 0)
2628 sc->bge_flags |= BGE_FLAG_PCIX;
2629 }
2630
2631 /*
2632 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2633 * not actually a MAC controller bug but an issue with the embedded
2634 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2635 */
2636 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2637 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2638 /*
2639 * Allocate the interrupt, using MSI if possible. These devices
2640 * support 8 MSI messages, but only the first one is used in
2641 * normal operation.
2642 */
2643 rid = 0;
2644 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2645 sc->bge_msicap = reg;
2646 if (bge_can_use_msi(sc)) {
2647 msicount = pci_msi_count(dev);
2648 if (msicount > 1)
2649 msicount = 1;
2650 } else
2651 msicount = 0;
2652 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2653 rid = 1;
2654 sc->bge_flags |= BGE_FLAG_MSI;
2655 }
2656 }
2657
2658 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2659 RF_SHAREABLE | RF_ACTIVE);
2660
2661 if (sc->bge_irq == NULL) {
2662 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2663 error = ENXIO;
2664 goto fail;
2665 }
2666
2667 device_printf(dev,
2668 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2669 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2670 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2671 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2672
2673 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2674
2675 /* Try to reset the chip. */
2676 if (bge_reset(sc)) {
2677 device_printf(sc->bge_dev, "chip reset failed\n");
2678 error = ENXIO;
2679 goto fail;
2680 }
2681
2682 sc->bge_asf_mode = 0;
2683 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2684 == BGE_MAGIC_NUMBER)) {
2685 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2686 & BGE_HWCFG_ASF) {
2687 sc->bge_asf_mode |= ASF_ENABLE;
2688 sc->bge_asf_mode |= ASF_STACKUP;
2689 if (BGE_IS_575X_PLUS(sc))
2690 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2691 }
2692 }
2693
2694 /* Try to reset the chip again the nice way. */
2695 bge_stop_fw(sc);
2696 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2697 if (bge_reset(sc)) {
2698 device_printf(sc->bge_dev, "chip reset failed\n");
2699 error = ENXIO;
2700 goto fail;
2701 }
2702
2703 bge_sig_legacy(sc, BGE_RESET_STOP);
2704 bge_sig_post_reset(sc, BGE_RESET_STOP);
2705
2706 if (bge_chipinit(sc)) {
2707 device_printf(sc->bge_dev, "chip initialization failed\n");
2708 error = ENXIO;
2709 goto fail;
2710 }
2711
2712 error = bge_get_eaddr(sc, eaddr);
2713 if (error) {
2714 device_printf(sc->bge_dev,
2715 "failed to read station address\n");
2716 error = ENXIO;
2717 goto fail;
2718 }
2719
2720 /* 5705 limits RX return ring to 512 entries. */
2721 if (BGE_IS_5705_PLUS(sc))
2722 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2723 else
2724 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2725
2726 if (bge_dma_alloc(sc)) {
2727 device_printf(sc->bge_dev,
2728 "failed to allocate DMA resources\n");
2729 error = ENXIO;
2730 goto fail;
2731 }
2732
2733 bge_add_sysctls(sc);
2734
2735 /* Set default tuneable values. */
2736 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2737 sc->bge_rx_coal_ticks = 150;
2738 sc->bge_tx_coal_ticks = 150;
2739 sc->bge_rx_max_coal_bds = 10;
2740 sc->bge_tx_max_coal_bds = 10;
2741
2742 /* Initialize checksum features to use. */
2743 sc->bge_csum_features = BGE_CSUM_FEATURES;
2744 if (sc->bge_forced_udpcsum != 0)
2745 sc->bge_csum_features |= CSUM_UDP;
2746
2747 /* Set up ifnet structure */
2748 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2749 if (ifp == NULL) {
2750 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2751 error = ENXIO;
2752 goto fail;
2753 }
2754 ifp->if_softc = sc;
2755 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2756 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2757 ifp->if_ioctl = bge_ioctl;
2758 ifp->if_start = bge_start;
2759 ifp->if_init = bge_init;
2760 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2761 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2762 IFQ_SET_READY(&ifp->if_snd);
2763 ifp->if_hwassist = sc->bge_csum_features;
2764 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2765 IFCAP_VLAN_MTU;
2766 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2767 ifp->if_hwassist |= CSUM_TSO;
2768 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
2769 }
2770#ifdef IFCAP_VLAN_HWCSUM
2771 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2772#endif
2773 ifp->if_capenable = ifp->if_capabilities;
2774#ifdef DEVICE_POLLING
2775 ifp->if_capabilities |= IFCAP_POLLING;
2776#endif
2777
2778 /*
2779 * 5700 B0 chips do not support checksumming correctly due
2780 * to hardware bugs.
2781 */
2782 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2783 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2784 ifp->if_capenable &= ~IFCAP_HWCSUM;
2785 ifp->if_hwassist = 0;
2786 }
2787
2788 /*
2789 * Figure out what sort of media we have by checking the
2790 * hardware config word in the first 32k of NIC internal memory,
2791 * or fall back to examining the EEPROM if necessary.
2792 * Note: on some BCM5700 cards, this value appears to be unset.
2793 * If that's the case, we have to rely on identifying the NIC
2794 * by its PCI subsystem ID, as we do below for the SysKonnect
2795 * SK-9D41.
2796 */
2797 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2798 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2799 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2800 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2801 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2802 sizeof(hwcfg))) {
2803 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2804 error = ENXIO;
2805 goto fail;
2806 }
2807 hwcfg = ntohl(hwcfg);
2808 }
2809
2810 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2811 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
2812 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2813 if (BGE_IS_5714_FAMILY(sc))
2814 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2815 else
2816 sc->bge_flags |= BGE_FLAG_TBI;
2817 }
2818
2819 if (sc->bge_flags & BGE_FLAG_TBI) {
2820 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2821 bge_ifmedia_sts);
2822 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2823 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2824 0, NULL);
2825 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2826 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2827 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2828 } else {
2829 /*
2830 * Do transceiver setup and tell the firmware the
2831 * driver is down so we can try to get access the
2832 * probe if ASF is running. Retry a couple of times
2833 * if we get a conflict with the ASF firmware accessing
2834 * the PHY.
2835 */
2836 trys = 0;
2837 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2838again:
2839 bge_asf_driver_up(sc);
2840
2841 if (mii_phy_probe(dev, &sc->bge_miibus,
2842 bge_ifmedia_upd, bge_ifmedia_sts)) {
2843 if (trys++ < 4) {
2844 device_printf(sc->bge_dev, "Try again\n");
2845 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2846 BMCR_RESET);
2847 goto again;
2848 }
2849
2850 device_printf(sc->bge_dev, "MII without any PHY!\n");
2851 error = ENXIO;
2852 goto fail;
2853 }
2854
2855 /*
2856 * Now tell the firmware we are going up after probing the PHY
2857 */
2858 if (sc->bge_asf_mode & ASF_STACKUP)
2859 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2860 }
2861
2862 /*
2863 * When using the BCM5701 in PCI-X mode, data corruption has
2864 * been observed in the first few bytes of some received packets.
2865 * Aligning the packet buffer in memory eliminates the corruption.
2866 * Unfortunately, this misaligns the packet payloads. On platforms
2867 * which do not support unaligned accesses, we will realign the
2868 * payloads by copying the received packets.
2869 */
2870 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2871 sc->bge_flags & BGE_FLAG_PCIX)
2872 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2873
2874 /*
2875 * Call MI attach routine.
2876 */
2877 ether_ifattach(ifp, eaddr);
2878 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2879
2880 /* Tell upper layer we support long frames. */
2881 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2882
2883 /*
2884 * Hookup IRQ last.
2885 */
2886#if __FreeBSD_version > 700030
2887 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
2888 /* Take advantage of single-shot MSI. */
2889 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
2890 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
2891 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
2892 taskqueue_thread_enqueue, &sc->bge_tq);
2893 if (sc->bge_tq == NULL) {
2894 device_printf(dev, "could not create taskqueue.\n");
2895 ether_ifdetach(ifp);
2896 error = ENXIO;
2897 goto fail;
2898 }
2899 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
2900 device_get_nameunit(sc->bge_dev));
2901 error = bus_setup_intr(dev, sc->bge_irq,
2902 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
2903 &sc->bge_intrhand);
2904 if (error)
2905 ether_ifdetach(ifp);
2906 } else
2907 error = bus_setup_intr(dev, sc->bge_irq,
2908 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
2909 &sc->bge_intrhand);
2910#else
2911 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2912 bge_intr, sc, &sc->bge_intrhand);
2913#endif
2914
2915 if (error) {
2916 bge_detach(dev);
2917 device_printf(sc->bge_dev, "couldn't set up irq\n");
2918 }
2919
2920 return (0);
2921
2922fail:
2923 bge_release_resources(sc);
2924
2925 return (error);
2926}
2927
2928static int
2929bge_detach(device_t dev)
2930{
2931 struct bge_softc *sc;
2932 struct ifnet *ifp;
2933
2934 sc = device_get_softc(dev);
2935 ifp = sc->bge_ifp;
2936
2937#ifdef DEVICE_POLLING
2938 if (ifp->if_capenable & IFCAP_POLLING)
2939 ether_poll_deregister(ifp);
2940#endif
2941
2942 BGE_LOCK(sc);
2943 bge_stop(sc);
2944 bge_reset(sc);
2945 BGE_UNLOCK(sc);
2946
2947 callout_drain(&sc->bge_stat_ch);
2948
2949 if (sc->bge_tq)
2950 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
2951 ether_ifdetach(ifp);
2952
2953 if (sc->bge_flags & BGE_FLAG_TBI) {
2954 ifmedia_removeall(&sc->bge_ifmedia);
2955 } else {
2956 bus_generic_detach(dev);
2957 device_delete_child(dev, sc->bge_miibus);
2958 }
2959
2960 bge_release_resources(sc);
2961
2962 return (0);
2963}
2964
2965static void
2966bge_release_resources(struct bge_softc *sc)
2967{
2968 device_t dev;
2969
2970 dev = sc->bge_dev;
2971
2972 if (sc->bge_tq != NULL)
2973 taskqueue_free(sc->bge_tq);
2974
2975 if (sc->bge_intrhand != NULL)
2976 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2977
2978 if (sc->bge_irq != NULL)
2979 bus_release_resource(dev, SYS_RES_IRQ,
2980 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2981
2982 if (sc->bge_flags & BGE_FLAG_MSI)
2983 pci_release_msi(dev);
2984
2985 if (sc->bge_res != NULL)
2986 bus_release_resource(dev, SYS_RES_MEMORY,
2987 PCIR_BAR(0), sc->bge_res);
2988
2989 if (sc->bge_ifp != NULL)
2990 if_free(sc->bge_ifp);
2991
2992 bge_dma_free(sc);
2993
2994 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2995 BGE_LOCK_DESTROY(sc);
2996}
2997
2998static int
2999bge_reset(struct bge_softc *sc)
3000{
3001 device_t dev;
3002 uint32_t cachesize, command, pcistate, reset, val;
3003 void (*write_op)(struct bge_softc *, int, int);
3004 uint16_t devctl;
3005 int i;
3006
3007 dev = sc->bge_dev;
3008
3009 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3010 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3011 if (sc->bge_flags & BGE_FLAG_PCIE)
3012 write_op = bge_writemem_direct;
3013 else
3014 write_op = bge_writemem_ind;
3015 } else
3016 write_op = bge_writereg_ind;
3017
3018 /* Save some important PCI state. */
3019 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3020 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3021 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3022
3023 pci_write_config(dev, BGE_PCI_MISC_CTL,
3024 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3025 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3026
3027 /* Disable fastboot on controllers that support it. */
3028 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3029 BGE_IS_5755_PLUS(sc)) {
3030 if (bootverbose)
3031 device_printf(dev, "Disabling fastboot\n");
3032 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3033 }
3034
3035 /*
3036 * Write the magic number to SRAM at offset 0xB50.
3037 * When firmware finishes its initialization it will
3038 * write ~BGE_MAGIC_NUMBER to the same location.
3039 */
3040 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3041
3042 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3043
3044 /* XXX: Broadcom Linux driver. */
3045 if (sc->bge_flags & BGE_FLAG_PCIE) {
3046 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3047 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3048 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3049 /* Prevent PCIE link training during global reset */
3050 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3051 reset |= 1 << 29;
3052 }
3053 }
3054
3055 /*
3056 * Set GPHY Power Down Override to leave GPHY
3057 * powered up in D0 uninitialized.
3058 */
3059 if (BGE_IS_5705_PLUS(sc))
3060 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3061
3062 /* Issue global reset */
3063 write_op(sc, BGE_MISC_CFG, reset);
3064
3065 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3066 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3067 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3068 val | BGE_VCPU_STATUS_DRV_RESET);
3069 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3070 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3071 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3072 }
3073
3074 DELAY(1000);
3075
3076 /* XXX: Broadcom Linux driver. */
3077 if (sc->bge_flags & BGE_FLAG_PCIE) {
3078 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3079 DELAY(500000); /* wait for link training to complete */
3080 val = pci_read_config(dev, 0xC4, 4);
3081 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3082 }
3083 devctl = pci_read_config(dev,
3084 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3085 /* Clear enable no snoop and disable relaxed ordering. */
3086 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3087 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3088 /* Set PCIE max payload size to 128. */
3089 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3090 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3091 devctl, 2);
3092 /* Clear error status. */
3093 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3094 PCIM_EXP_STA_CORRECTABLE_ERROR |
3095 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3096 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3097 }
3098
3099 /* Reset some of the PCI state that got zapped by reset. */
3100 pci_write_config(dev, BGE_PCI_MISC_CTL,
3101 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3102 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3103 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3104 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3105 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3106 /*
3107 * Disable PCI-X relaxed ordering to ensure status block update
3108 * comes first then packet buffer DMA. Otherwise driver may
3109 * read stale status block.
3110 */
3111 if (sc->bge_flags & BGE_FLAG_PCIX) {
3112 devctl = pci_read_config(dev,
3113 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3114 devctl &= ~PCIXM_COMMAND_ERO;
3115 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3116 devctl &= ~PCIXM_COMMAND_MAX_READ;
3117 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3118 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3119 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3120 PCIXM_COMMAND_MAX_READ);
3121 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3122 }
3123 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3124 devctl, 2);
3125 }
3126 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3127 if (BGE_IS_5714_FAMILY(sc)) {
3128 /* This chip disables MSI on reset. */
3129 if (sc->bge_flags & BGE_FLAG_MSI) {
3130 val = pci_read_config(dev,
3131 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3132 pci_write_config(dev,
3133 sc->bge_msicap + PCIR_MSI_CTRL,
3134 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3135 val = CSR_READ_4(sc, BGE_MSI_MODE);
3136 CSR_WRITE_4(sc, BGE_MSI_MODE,
3137 val | BGE_MSIMODE_ENABLE);
3138 }
3139 val = CSR_READ_4(sc, BGE_MARB_MODE);
3140 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3141 } else
3142 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3143
3144 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3145 for (i = 0; i < BGE_TIMEOUT; i++) {
3146 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3147 if (val & BGE_VCPU_STATUS_INIT_DONE)
3148 break;
3149 DELAY(100);
3150 }
3151 if (i == BGE_TIMEOUT) {
3152 device_printf(dev, "reset timed out\n");
3153 return (1);
3154 }
3155 } else {
3156 /*
3157 * Poll until we see the 1's complement of the magic number.
3158 * This indicates that the firmware initialization is complete.
3159 * We expect this to fail if no chip containing the Ethernet
3160 * address is fitted though.
3161 */
3162 for (i = 0; i < BGE_TIMEOUT; i++) {
3163 DELAY(10);
3164 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3165 if (val == ~BGE_MAGIC_NUMBER)
3166 break;
3167 }
3168
3169 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3170 device_printf(dev,
3171 "firmware handshake timed out, found 0x%08x\n",
3172 val);
3173 }
3174
3175 /*
3176 * XXX Wait for the value of the PCISTATE register to
3177 * return to its original pre-reset state. This is a
3178 * fairly good indicator of reset completion. If we don't
3179 * wait for the reset to fully complete, trying to read
3180 * from the device's non-PCI registers may yield garbage
3181 * results.
3182 */
3183 for (i = 0; i < BGE_TIMEOUT; i++) {
3184 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3185 break;
3186 DELAY(10);
3187 }
3188
3189 /* Fix up byte swapping. */
3190 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3191 BGE_MODECTL_BYTESWAP_DATA);
3192
3193 /* Tell the ASF firmware we are up */
3194 if (sc->bge_asf_mode & ASF_STACKUP)
3195 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3196
3197 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3198
3199 /*
3200 * The 5704 in TBI mode apparently needs some special
3201 * adjustment to insure the SERDES drive level is set
3202 * to 1.2V.
3203 */
3204 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3205 sc->bge_flags & BGE_FLAG_TBI) {
3206 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3207 val = (val & ~0xFFF) | 0x880;
3208 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3209 }
3210
3211 /* XXX: Broadcom Linux driver. */
3212 if (sc->bge_flags & BGE_FLAG_PCIE &&
3213 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3214 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3215 /* Enable Data FIFO protection. */
3216 val = CSR_READ_4(sc, 0x7C00);
3217 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3218 }
3219 DELAY(10000);
3220
3221 return (0);
3222}
3223
3224static __inline void
3225bge_rxreuse_std(struct bge_softc *sc, int i)
3226{
3227 struct bge_rx_bd *r;
3228
3229 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3230 r->bge_flags = BGE_RXBDFLAG_END;
3231 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3232 r->bge_idx = i;
3233 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3234}
3235
3236static __inline void
3237bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3238{
3239 struct bge_extrx_bd *r;
3240
3241 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3242 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3243 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3244 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3245 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3246 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3247 r->bge_idx = i;
3248 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3249}
3250
3251/*
3252 * Frame reception handling. This is called if there's a frame
3253 * on the receive return list.
3254 *
3255 * Note: we have to be able to handle two possibilities here:
3256 * 1) the frame is from the jumbo receive ring
3257 * 2) the frame is from the standard receive ring
3258 */
3259
3260static int
3261bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3262{
3263 struct ifnet *ifp;
3264 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3265 uint16_t rx_cons;
3266
3267 rx_cons = sc->bge_rx_saved_considx;
3268
3269 /* Nothing to do. */
3270 if (rx_cons == rx_prod)
3271 return (rx_npkts);
3272
3273 ifp = sc->bge_ifp;
3274
3275 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3276 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3277 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3278 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3279 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3280 (MCLBYTES - ETHER_ALIGN))
3281 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3282 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3283
3284 while (rx_cons != rx_prod) {
3285 struct bge_rx_bd *cur_rx;
3286 uint32_t rxidx;
3287 struct mbuf *m = NULL;
3288 uint16_t vlan_tag = 0;
3289 int have_tag = 0;
3290
3291#ifdef DEVICE_POLLING
3292 if (ifp->if_capenable & IFCAP_POLLING) {
3293 if (sc->rxcycles <= 0)
3294 break;
3295 sc->rxcycles--;
3296 }
3297#endif
3298
3299 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3300
3301 rxidx = cur_rx->bge_idx;
3302 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3303
3304 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3305 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3306 have_tag = 1;
3307 vlan_tag = cur_rx->bge_vlan_tag;
3308 }
3309
3310 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3311 jumbocnt++;
3312 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3313 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3314 bge_rxreuse_jumbo(sc, rxidx);
3315 continue;
3316 }
3317 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3318 bge_rxreuse_jumbo(sc, rxidx);
3319 ifp->if_iqdrops++;
3320 continue;
3321 }
3322 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3323 } else {
3324 stdcnt++;
3325 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3326 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3327 bge_rxreuse_std(sc, rxidx);
3328 continue;
3329 }
3330 if (bge_newbuf_std(sc, rxidx) != 0) {
3331 bge_rxreuse_std(sc, rxidx);
3332 ifp->if_iqdrops++;
3333 continue;
3334 }
3335 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3336 }
3337
3338 ifp->if_ipackets++;
3339#ifndef __NO_STRICT_ALIGNMENT
3340 /*
3341 * For architectures with strict alignment we must make sure
3342 * the payload is aligned.
3343 */
3344 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3345 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3346 cur_rx->bge_len);
3347 m->m_data += ETHER_ALIGN;
3348 }
3349#endif
3350 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3351 m->m_pkthdr.rcvif = ifp;
3352
3353 if (ifp->if_capenable & IFCAP_RXCSUM) {
3354 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3355 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3356 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3357 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3358 }
3359 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3360 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3361 m->m_pkthdr.csum_data =
3362 cur_rx->bge_tcp_udp_csum;
3363 m->m_pkthdr.csum_flags |=
3364 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3365 }
3366 }
3367
3368 /*
3369 * If we received a packet with a vlan tag,
3370 * attach that information to the packet.
3371 */
3372 if (have_tag) {
3373#if __FreeBSD_version > 700022
3374 m->m_pkthdr.ether_vtag = vlan_tag;
3375 m->m_flags |= M_VLANTAG;
3376#else
3377 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3378 if (m == NULL)
3379 continue;
3380#endif
3381 }
3382
3383 if (holdlck != 0) {
3384 BGE_UNLOCK(sc);
3385 (*ifp->if_input)(ifp, m);
3386 BGE_LOCK(sc);
3387 } else
3388 (*ifp->if_input)(ifp, m);
3389 rx_npkts++;
3390
3391 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3392 return (rx_npkts);
3393 }
3394
3395 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3396 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3397 if (stdcnt > 0)
3398 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3399 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3400
3401 if (jumbocnt > 0)
3402 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3403 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3404
3405 sc->bge_rx_saved_considx = rx_cons;
3406 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3407 if (stdcnt)
3408 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3409 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3410 if (jumbocnt)
3411 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3412 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3413#ifdef notyet
3414 /*
3415 * This register wraps very quickly under heavy packet drops.
3416 * If you need correct statistics, you can enable this check.
3417 */
3418 if (BGE_IS_5705_PLUS(sc))
3419 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3420#endif
3421 return (rx_npkts);
3422}
3423
3424static void
3425bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3426{
3427 struct bge_tx_bd *cur_tx;
3428 struct ifnet *ifp;
3429
3430 BGE_LOCK_ASSERT(sc);
3431
3432 /* Nothing to do. */
3433 if (sc->bge_tx_saved_considx == tx_cons)
3434 return;
3435
3436 ifp = sc->bge_ifp;
3437
3438 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3439 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3440 /*
3441 * Go through our tx ring and free mbufs for those
3442 * frames that have been sent.
3443 */
3444 while (sc->bge_tx_saved_considx != tx_cons) {
3445 uint32_t idx;
3446
3447 idx = sc->bge_tx_saved_considx;
3448 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3449 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3450 ifp->if_opackets++;
3451 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3452 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3453 sc->bge_cdata.bge_tx_dmamap[idx],
3454 BUS_DMASYNC_POSTWRITE);
3455 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3456 sc->bge_cdata.bge_tx_dmamap[idx]);
3457 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3458 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3459 }
3460 sc->bge_txcnt--;
3461 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3462 }
3463
3464 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3465 if (sc->bge_txcnt == 0)
3466 sc->bge_timer = 0;
3467}
3468
3469#ifdef DEVICE_POLLING
3470static int
3471bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3472{
3473 struct bge_softc *sc = ifp->if_softc;
3474 uint16_t rx_prod, tx_cons;
3475 uint32_t statusword;
3476 int rx_npkts = 0;
3477
3478 BGE_LOCK(sc);
3479 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3480 BGE_UNLOCK(sc);
3481 return (rx_npkts);
3482 }
3483
3484 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3485 sc->bge_cdata.bge_status_map,
3486 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3487 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3488 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3489
3490 statusword = sc->bge_ldata.bge_status_block->bge_status;
3491 sc->bge_ldata.bge_status_block->bge_status = 0;
3492
3493 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3494 sc->bge_cdata.bge_status_map,
3495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3496
3497 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3498 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3499 sc->bge_link_evt++;
3500
3501 if (cmd == POLL_AND_CHECK_STATUS)
3502 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3503 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3504 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3505 bge_link_upd(sc);
3506
3507 sc->rxcycles = count;
3508 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3509 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3510 BGE_UNLOCK(sc);
3511 return (rx_npkts);
3512 }
3513 bge_txeof(sc, tx_cons);
3514 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3515 bge_start_locked(ifp);
3516
3517 BGE_UNLOCK(sc);
3518 return (rx_npkts);
3519}
3520#endif /* DEVICE_POLLING */
3521
3522static int
3523bge_msi_intr(void *arg)
3524{
3525 struct bge_softc *sc;
3526
3527 sc = (struct bge_softc *)arg;
3528 /*
3529 * This interrupt is not shared and controller already
3530 * disabled further interrupt.
3531 */
3532 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3533 return (FILTER_HANDLED);
3534}
3535
3536static void
3537bge_intr_task(void *arg, int pending)
3538{
3539 struct bge_softc *sc;
3540 struct ifnet *ifp;
3541 uint32_t status;
3542 uint16_t rx_prod, tx_cons;
3543
3544 sc = (struct bge_softc *)arg;
3545 ifp = sc->bge_ifp;
3546
3547 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3548 return;
3549
3550 /* Get updated status block. */
3551 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3552 sc->bge_cdata.bge_status_map,
3553 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3554
3555 /* Save producer/consumer indexess. */
3556 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3557 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3558 status = sc->bge_ldata.bge_status_block->bge_status;
3559 sc->bge_ldata.bge_status_block->bge_status = 0;
3560 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3561 sc->bge_cdata.bge_status_map,
3562 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3563 /* Let controller work. */
3564 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3565
3566 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) {
3567 BGE_LOCK(sc);
3568 bge_link_upd(sc);
3569 BGE_UNLOCK(sc);
3570 }
3571 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3572 /* Check RX return ring producer/consumer. */
3573 bge_rxeof(sc, rx_prod, 0);
3574 }
3575 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3576 BGE_LOCK(sc);
3577 /* Check TX ring producer/consumer. */
3578 bge_txeof(sc, tx_cons);
3579 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3580 bge_start_locked(ifp);
3581 BGE_UNLOCK(sc);
3582 }
3583}
3584
3585static void
3586bge_intr(void *xsc)
3587{
3588 struct bge_softc *sc;
3589 struct ifnet *ifp;
3590 uint32_t statusword;
3591 uint16_t rx_prod, tx_cons;
3592
3593 sc = xsc;
3594
3595 BGE_LOCK(sc);
3596
3597 ifp = sc->bge_ifp;
3598
3599#ifdef DEVICE_POLLING
3600 if (ifp->if_capenable & IFCAP_POLLING) {
3601 BGE_UNLOCK(sc);
3602 return;
3603 }
3604#endif
3605
3606 /*
3607 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3608 * disable interrupts by writing nonzero like we used to, since with
3609 * our current organization this just gives complications and
3610 * pessimizations for re-enabling interrupts. We used to have races
3611 * instead of the necessary complications. Disabling interrupts
3612 * would just reduce the chance of a status update while we are
3613 * running (by switching to the interrupt-mode coalescence
3614 * parameters), but this chance is already very low so it is more
3615 * efficient to get another interrupt than prevent it.
3616 *
3617 * We do the ack first to ensure another interrupt if there is a
3618 * status update after the ack. We don't check for the status
3619 * changing later because it is more efficient to get another
3620 * interrupt than prevent it, not quite as above (not checking is
3621 * a smaller optimization than not toggling the interrupt enable,
3622 * since checking doesn't involve PCI accesses and toggling require
3623 * the status check). So toggling would probably be a pessimization
3624 * even with MSI. It would only be needed for using a task queue.
3625 */
3626 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3627
3628 /*
3629 * Do the mandatory PCI flush as well as get the link status.
3630 */
3631 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3632
3633 /* Make sure the descriptor ring indexes are coherent. */
3634 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3635 sc->bge_cdata.bge_status_map,
3636 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3637 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3638 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3639 sc->bge_ldata.bge_status_block->bge_status = 0;
3640 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3641 sc->bge_cdata.bge_status_map,
3642 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3643
3644 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3645 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3646 statusword || sc->bge_link_evt)
3647 bge_link_upd(sc);
3648
3649 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3650 /* Check RX return ring producer/consumer. */
3651 bge_rxeof(sc, rx_prod, 1);
3652 }
3653
3654 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3655 /* Check TX ring producer/consumer. */
3656 bge_txeof(sc, tx_cons);
3657 }
3658
3659 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3660 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3661 bge_start_locked(ifp);
3662
3663 BGE_UNLOCK(sc);
3664}
3665
3666static void
3667bge_asf_driver_up(struct bge_softc *sc)
3668{
3669 if (sc->bge_asf_mode & ASF_STACKUP) {
3670 /* Send ASF heartbeat aprox. every 2s */
3671 if (sc->bge_asf_count)
3672 sc->bge_asf_count --;
3673 else {
3674 sc->bge_asf_count = 2;
3675 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3676 BGE_FW_DRV_ALIVE);
3677 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3678 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3679 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3680 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3681 }
3682 }
3683}
3684
3685static void
3686bge_tick(void *xsc)
3687{
3688 struct bge_softc *sc = xsc;
3689 struct mii_data *mii = NULL;
3690
3691 BGE_LOCK_ASSERT(sc);
3692
3693 /* Synchronize with possible callout reset/stop. */
3694 if (callout_pending(&sc->bge_stat_ch) ||
3695 !callout_active(&sc->bge_stat_ch))
3696 return;
3697
3698 if (BGE_IS_5705_PLUS(sc))
3699 bge_stats_update_regs(sc);
3700 else
3701 bge_stats_update(sc);
3702
3703 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3704 mii = device_get_softc(sc->bge_miibus);
3705 /*
3706 * Do not touch PHY if we have link up. This could break
3707 * IPMI/ASF mode or produce extra input errors
3708 * (extra errors was reported for bcm5701 & bcm5704).
3709 */
3710 if (!sc->bge_link)
3711 mii_tick(mii);
3712 } else {
3713 /*
3714 * Since in TBI mode auto-polling can't be used we should poll
3715 * link status manually. Here we register pending link event
3716 * and trigger interrupt.
3717 */
3718#ifdef DEVICE_POLLING
3719 /* In polling mode we poll link state in bge_poll(). */
3720 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3721#endif
3722 {
3723 sc->bge_link_evt++;
3724 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3725 sc->bge_flags & BGE_FLAG_5788)
3726 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3727 else
3728 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3729 }
3730 }
3731
3732 bge_asf_driver_up(sc);
3733 bge_watchdog(sc);
3734
3735 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3736}
3737
3738static void
3739bge_stats_update_regs(struct bge_softc *sc)
3740{
3741 struct ifnet *ifp;
3742 struct bge_mac_stats *stats;
3743
3744 ifp = sc->bge_ifp;
3745 stats = &sc->bge_mac_stats;
3746
3747 stats->ifHCOutOctets +=
3748 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3749 stats->etherStatsCollisions +=
3750 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3751 stats->outXonSent +=
3752 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3753 stats->outXoffSent +=
3754 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3755 stats->dot3StatsInternalMacTransmitErrors +=
3756 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3757 stats->dot3StatsSingleCollisionFrames +=
3758 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3759 stats->dot3StatsMultipleCollisionFrames +=
3760 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3761 stats->dot3StatsDeferredTransmissions +=
3762 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3763 stats->dot3StatsExcessiveCollisions +=
3764 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3765 stats->dot3StatsLateCollisions +=
3766 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3767 stats->ifHCOutUcastPkts +=
3768 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3769 stats->ifHCOutMulticastPkts +=
3770 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3771 stats->ifHCOutBroadcastPkts +=
3772 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3773
3774 stats->ifHCInOctets +=
3775 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3776 stats->etherStatsFragments +=
3777 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3778 stats->ifHCInUcastPkts +=
3779 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3780 stats->ifHCInMulticastPkts +=
3781 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3782 stats->ifHCInBroadcastPkts +=
3783 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3784 stats->dot3StatsFCSErrors +=
3785 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3786 stats->dot3StatsAlignmentErrors +=
3787 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3788 stats->xonPauseFramesReceived +=
3789 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3790 stats->xoffPauseFramesReceived +=
3791 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3792 stats->macControlFramesReceived +=
3793 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3794 stats->xoffStateEntered +=
3795 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3796 stats->dot3StatsFramesTooLong +=
3797 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3798 stats->etherStatsJabbers +=
3799 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3800 stats->etherStatsUndersizePkts +=
3801 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3802
3803 stats->FramesDroppedDueToFilters +=
3804 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3805 stats->DmaWriteQueueFull +=
3806 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3807 stats->DmaWriteHighPriQueueFull +=
3808 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3809 stats->NoMoreRxBDs +=
3810 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3811 stats->InputDiscards +=
3812 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3813 stats->InputErrors +=
3814 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3815 stats->RecvThresholdHit +=
3816 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3817
3818 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
3819 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
3820 stats->InputErrors);
3821}
3822
3823static void
3824bge_stats_clear_regs(struct bge_softc *sc)
3825{
3826
3827 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3828 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3829 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3830 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3831 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3832 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3833 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3834 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3835 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3836 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3837 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3838 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3839 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3840
3841 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3842 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3843 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3844 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3845 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3846 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3847 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3848 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3849 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3850 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3851 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3852 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3853 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3854 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3855
3856 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3857 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3858 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3859 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3860 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3861 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3862 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3863}
3864
3865static void
3866bge_stats_update(struct bge_softc *sc)
3867{
3868 struct ifnet *ifp;
3869 bus_size_t stats;
3870 uint32_t cnt; /* current register value */
3871
3872 ifp = sc->bge_ifp;
3873
3874 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3875
3876#define READ_STAT(sc, stats, stat) \
3877 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3878
3879 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3880 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3881 sc->bge_tx_collisions = cnt;
3882
3883 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3884 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3885 sc->bge_rx_discards = cnt;
3886
3887 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3888 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3889 sc->bge_tx_discards = cnt;
3890
3891#undef READ_STAT
3892}
3893
3894/*
3895 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3896 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3897 * but when such padded frames employ the bge IP/TCP checksum offload,
3898 * the hardware checksum assist gives incorrect results (possibly
3899 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3900 * If we pad such runts with zeros, the onboard checksum comes out correct.
3901 */
3902static __inline int
3903bge_cksum_pad(struct mbuf *m)
3904{
3905 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3906 struct mbuf *last;
3907
3908 /* If there's only the packet-header and we can pad there, use it. */
3909 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3910 M_TRAILINGSPACE(m) >= padlen) {
3911 last = m;
3912 } else {
3913 /*
3914 * Walk packet chain to find last mbuf. We will either
3915 * pad there, or append a new mbuf and pad it.
3916 */
3917 for (last = m; last->m_next != NULL; last = last->m_next);
3918 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3919 /* Allocate new empty mbuf, pad it. Compact later. */
3920 struct mbuf *n;
3921
3922 MGET(n, M_DONTWAIT, MT_DATA);
3923 if (n == NULL)
3924 return (ENOBUFS);
3925 n->m_len = 0;
3926 last->m_next = n;
3927 last = n;
3928 }
3929 }
3930
3931 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3932 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3933 last->m_len += padlen;
3934 m->m_pkthdr.len += padlen;
3935
3936 return (0);
3937}
3938
3939static struct mbuf *
3940bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
3941{
3942 struct ip *ip;
3943 struct tcphdr *tcp;
3944 struct mbuf *n;
3945 uint16_t hlen;
3946 uint32_t poff;
3947
3948 if (M_WRITABLE(m) == 0) {
3949 /* Get a writable copy. */
3950 n = m_dup(m, M_DONTWAIT);
3951 m_freem(m);
3952 if (n == NULL)
3953 return (NULL);
3954 m = n;
3955 }
3956 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
3957 if (m == NULL)
3958 return (NULL);
3959 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
3960 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
3961 m = m_pullup(m, poff + sizeof(struct tcphdr));
3962 if (m == NULL)
3963 return (NULL);
3964 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
3965 m = m_pullup(m, poff + (tcp->th_off << 2));
3966 if (m == NULL)
3967 return (NULL);
3968 /*
3969 * It seems controller doesn't modify IP length and TCP pseudo
3970 * checksum. These checksum computed by upper stack should be 0.
3971 */
3972 *mss = m->m_pkthdr.tso_segsz;
3973 ip->ip_sum = 0;
3974 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
3975 /* Clear pseudo checksum computed by TCP stack. */
3976 tcp->th_sum = 0;
3977 /*
3978 * Broadcom controllers uses different descriptor format for
3979 * TSO depending on ASIC revision. Due to TSO-capable firmware
3980 * license issue and lower performance of firmware based TSO
3981 * we only support hardware based TSO which is applicable for
3982 * BCM5755 or newer controllers. Hardware based TSO uses 11
3983 * bits to store MSS and upper 5 bits are used to store IP/TCP
3984 * header length(including IP/TCP options). The header length
3985 * is expressed as 32 bits unit.
3986 */
3987 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
3988 *mss |= (hlen << 11);
3989 return (m);
3990}
3991
3992/*
3993 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3994 * pointers to descriptors.
3995 */
3996static int
3997bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3998{
3999 bus_dma_segment_t segs[BGE_NSEG_NEW];
4000 bus_dmamap_t map;
4001 struct bge_tx_bd *d;
4002 struct mbuf *m = *m_head;
4003 uint32_t idx = *txidx;
4004 uint16_t csum_flags, mss, vlan_tag;
4005 int nsegs, i, error;
4006
4007 csum_flags = 0;
4008 mss = 0;
4009 vlan_tag = 0;
4010 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4011 *m_head = m = bge_setup_tso(sc, m, &mss);
4012 if (*m_head == NULL)
4013 return (ENOBUFS);
4014 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4015 BGE_TXBDFLAG_CPU_POST_DMA;
4016 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4017 if (m->m_pkthdr.csum_flags & CSUM_IP)
4018 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4019 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4020 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4021 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4022 (error = bge_cksum_pad(m)) != 0) {
4023 m_freem(m);
4024 *m_head = NULL;
4025 return (error);
4026 }
4027 }
4028 if (m->m_flags & M_LASTFRAG)
4029 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4030 else if (m->m_flags & M_FRAG)
4031 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4032 }
4033
4034 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
4035 sc->bge_forced_collapse > 0 &&
4036 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4037 /*
4038 * Forcedly collapse mbuf chains to overcome hardware
4039 * limitation which only support a single outstanding
4040 * DMA read operation.
4041 */
4042 if (sc->bge_forced_collapse == 1)
4043 m = m_defrag(m, M_DONTWAIT);
4044 else
4045 m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
4046 if (m == NULL)
4047 m = *m_head;
4048 *m_head = m;
4049 }
4050
4051 map = sc->bge_cdata.bge_tx_dmamap[idx];
4052 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4053 &nsegs, BUS_DMA_NOWAIT);
4054 if (error == EFBIG) {
4055 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4056 if (m == NULL) {
4057 m_freem(*m_head);
4058 *m_head = NULL;
4059 return (ENOBUFS);
4060 }
4061 *m_head = m;
4062 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4063 m, segs, &nsegs, BUS_DMA_NOWAIT);
4064 if (error) {
4065 m_freem(m);
4066 *m_head = NULL;
4067 return (error);
4068 }
4069 } else if (error != 0)
4070 return (error);
4071
4072 /* Check if we have enough free send BDs. */
4073 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4074 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4075 return (ENOBUFS);
4076 }
4077
4078 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4079
4080#if __FreeBSD_version > 700022
4081 if (m->m_flags & M_VLANTAG) {
4082 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4083 vlan_tag = m->m_pkthdr.ether_vtag;
4084 }
4085#else
4086 {
4087 struct m_tag *mtag;
4088
4089 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4090 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4091 vlan_tag = VLAN_TAG_VALUE(mtag);
4092 }
4093 }
4094#endif
4095 for (i = 0; ; i++) {
4096 d = &sc->bge_ldata.bge_tx_ring[idx];
4097 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4098 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4099 d->bge_len = segs[i].ds_len;
4100 d->bge_flags = csum_flags;
4101 d->bge_vlan_tag = vlan_tag;
4102 d->bge_mss = mss;
4103 if (i == nsegs - 1)
4104 break;
4105 BGE_INC(idx, BGE_TX_RING_CNT);
4106 }
4107
4108 /* Mark the last segment as end of packet... */
4109 d->bge_flags |= BGE_TXBDFLAG_END;
4110
4111 /*
4112 * Insure that the map for this transmission
4113 * is placed at the array index of the last descriptor
4114 * in this chain.
4115 */
4116 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4117 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4118 sc->bge_cdata.bge_tx_chain[idx] = m;
4119 sc->bge_txcnt += nsegs;
4120
4121 BGE_INC(idx, BGE_TX_RING_CNT);
4122 *txidx = idx;
4123
4124 return (0);
4125}
4126
4127/*
4128 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4129 * to the mbuf data regions directly in the transmit descriptors.
4130 */
4131static void
4132bge_start_locked(struct ifnet *ifp)
4133{
4134 struct bge_softc *sc;
4135 struct mbuf *m_head;
4136 uint32_t prodidx;
4137 int count;
4138
4139 sc = ifp->if_softc;
4140 BGE_LOCK_ASSERT(sc);
4141
4142 if (!sc->bge_link ||
4143 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4144 IFF_DRV_RUNNING)
4145 return;
4146
4147 prodidx = sc->bge_tx_prodidx;
4148
4149 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4150 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4151 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4152 break;
4153 }
4154 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4155 if (m_head == NULL)
4156 break;
4157
4158 /*
4159 * XXX
4160 * The code inside the if() block is never reached since we
4161 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4162 * requests to checksum TCP/UDP in a fragmented packet.
4163 *
4164 * XXX
4165 * safety overkill. If this is a fragmented packet chain
4166 * with delayed TCP/UDP checksums, then only encapsulate
4167 * it if we have enough descriptors to handle the entire
4168 * chain at once.
4169 * (paranoia -- may not actually be needed)
4170 */
4171 if (m_head->m_flags & M_FIRSTFRAG &&
4172 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4173 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4174 m_head->m_pkthdr.csum_data + 16) {
4175 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4176 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4177 break;
4178 }
4179 }
4180
4181 /*
4182 * Pack the data into the transmit ring. If we
4183 * don't have room, set the OACTIVE flag and wait
4184 * for the NIC to drain the ring.
4185 */
4186 if (bge_encap(sc, &m_head, &prodidx)) {
4187 if (m_head == NULL)
4188 break;
4189 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4190 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4191 break;
4192 }
4193 ++count;
4194
4195 /*
4196 * If there's a BPF listener, bounce a copy of this frame
4197 * to him.
4198 */
4199#ifdef ETHER_BPF_MTAP
4200 ETHER_BPF_MTAP(ifp, m_head);
4201#else
4202 BPF_MTAP(ifp, m_head);
4203#endif
4204 }
4205
4206 if (count > 0) {
4207 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4208 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4209 /* Transmit. */
4210 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4211 /* 5700 b2 errata */
4212 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4213 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4214
4215 sc->bge_tx_prodidx = prodidx;
4216
4217 /*
4218 * Set a timeout in case the chip goes out to lunch.
4219 */
4220 sc->bge_timer = 5;
4221 }
4222}
4223
4224/*
4225 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4226 * to the mbuf data regions directly in the transmit descriptors.
4227 */
4228static void
4229bge_start(struct ifnet *ifp)
4230{
4231 struct bge_softc *sc;
4232
4233 sc = ifp->if_softc;
4234 BGE_LOCK(sc);
4235 bge_start_locked(ifp);
4236 BGE_UNLOCK(sc);
4237}
4238
4239static void
4240bge_init_locked(struct bge_softc *sc)
4241{
4242 struct ifnet *ifp;
4243 uint16_t *m;
4244
4245 BGE_LOCK_ASSERT(sc);
4246
4247 ifp = sc->bge_ifp;
4248
4249 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4250 return;
4251
4252 /* Cancel pending I/O and flush buffers. */
4253 bge_stop(sc);
4254
4255 bge_stop_fw(sc);
4256 bge_sig_pre_reset(sc, BGE_RESET_START);
4257 bge_reset(sc);
4258 bge_sig_legacy(sc, BGE_RESET_START);
4259 bge_sig_post_reset(sc, BGE_RESET_START);
4260
4261 bge_chipinit(sc);
4262
4263 /*
4264 * Init the various state machines, ring
4265 * control blocks and firmware.
4266 */
4267 if (bge_blockinit(sc)) {
4268 device_printf(sc->bge_dev, "initialization failure\n");
4269 return;
4270 }
4271
4272 ifp = sc->bge_ifp;
4273
4274 /* Specify MTU. */
4275 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4276 ETHER_HDR_LEN + ETHER_CRC_LEN +
4277 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4278
4279 /* Load our MAC address. */
4280 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4281 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4282 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4283
4284 /* Program promiscuous mode. */
4285 bge_setpromisc(sc);
4286
4287 /* Program multicast filter. */
4288 bge_setmulti(sc);
4289
4290 /* Program VLAN tag stripping. */
4291 bge_setvlan(sc);
4292
4293 /* Override UDP checksum offloading. */
4294 if (sc->bge_forced_udpcsum == 0)
4295 sc->bge_csum_features &= ~CSUM_UDP;
4296 else
4297 sc->bge_csum_features |= CSUM_UDP;
4298 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4299 ifp->if_capenable & IFCAP_TXCSUM) {
4300 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4301 ifp->if_hwassist |= sc->bge_csum_features;
4302 }
4303
4304 /* Init RX ring. */
4305 if (bge_init_rx_ring_std(sc) != 0) {
4306 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4307 bge_stop(sc);
4308 return;
4309 }
4310
4311 /*
4312 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4313 * memory to insure that the chip has in fact read the first
4314 * entry of the ring.
4315 */
4316 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4317 uint32_t v, i;
4318 for (i = 0; i < 10; i++) {
4319 DELAY(20);
4320 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4321 if (v == (MCLBYTES - ETHER_ALIGN))
4322 break;
4323 }
4324 if (i == 10)
4325 device_printf (sc->bge_dev,
4326 "5705 A0 chip failed to load RX ring\n");
4327 }
4328
4329 /* Init jumbo RX ring. */
4330 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4331 (MCLBYTES - ETHER_ALIGN)) {
4332 if (bge_init_rx_ring_jumbo(sc) != 0) {
4333 device_printf(sc->bge_dev,
4334 "no memory for jumbo Rx buffers.\n");
4335 bge_stop(sc);
4336 return;
4337 }
4338 }
4339
4340 /* Init our RX return ring index. */
4341 sc->bge_rx_saved_considx = 0;
4342
4343 /* Init our RX/TX stat counters. */
4344 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4345
4346 /* Init TX ring. */
4347 bge_init_tx_ring(sc);
4348
4349 /* Turn on transmitter. */
4350 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4351
4352 /* Turn on receiver. */
4353 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4354
4355 /*
4356 * Set the number of good frames to receive after RX MBUF
4357 * Low Watermark has been reached. After the RX MAC receives
4358 * this number of frames, it will drop subsequent incoming
4359 * frames until the MBUF High Watermark is reached.
4360 */
4361 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4362
4363 /* Clear MAC statistics. */
4364 if (BGE_IS_5705_PLUS(sc))
4365 bge_stats_clear_regs(sc);
4366
4367 /* Tell firmware we're alive. */
4368 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4369
4370#ifdef DEVICE_POLLING
4371 /* Disable interrupts if we are polling. */
4372 if (ifp->if_capenable & IFCAP_POLLING) {
4373 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4374 BGE_PCIMISCCTL_MASK_PCI_INTR);
4375 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4376 } else
4377#endif
4378
4379 /* Enable host interrupts. */
4380 {
4381 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4382 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4383 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4384 }
4385
4386 bge_ifmedia_upd_locked(ifp);
4387
4388 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4389 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4390
4391 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4392}
4393
4394static void
4395bge_init(void *xsc)
4396{
4397 struct bge_softc *sc = xsc;
4398
4399 BGE_LOCK(sc);
4400 bge_init_locked(sc);
4401 BGE_UNLOCK(sc);
4402}
4403
4404/*
4405 * Set media options.
4406 */
4407static int
4408bge_ifmedia_upd(struct ifnet *ifp)
4409{
4410 struct bge_softc *sc = ifp->if_softc;
4411 int res;
4412
4413 BGE_LOCK(sc);
4414 res = bge_ifmedia_upd_locked(ifp);
4415 BGE_UNLOCK(sc);
4416
4417 return (res);
4418}
4419
4420static int
4421bge_ifmedia_upd_locked(struct ifnet *ifp)
4422{
4423 struct bge_softc *sc = ifp->if_softc;
4424 struct mii_data *mii;
4425 struct mii_softc *miisc;
4426 struct ifmedia *ifm;
4427
4428 BGE_LOCK_ASSERT(sc);
4429
4430 ifm = &sc->bge_ifmedia;
4431
4432 /* If this is a 1000baseX NIC, enable the TBI port. */
4433 if (sc->bge_flags & BGE_FLAG_TBI) {
4434 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4435 return (EINVAL);
4436 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4437 case IFM_AUTO:
4438 /*
4439 * The BCM5704 ASIC appears to have a special
4440 * mechanism for programming the autoneg
4441 * advertisement registers in TBI mode.
4442 */
4443 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4444 uint32_t sgdig;
4445 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4446 if (sgdig & BGE_SGDIGSTS_DONE) {
4447 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4448 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4449 sgdig |= BGE_SGDIGCFG_AUTO |
4450 BGE_SGDIGCFG_PAUSE_CAP |
4451 BGE_SGDIGCFG_ASYM_PAUSE;
4452 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4453 sgdig | BGE_SGDIGCFG_SEND);
4454 DELAY(5);
4455 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4456 }
4457 }
4458 break;
4459 case IFM_1000_SX:
4460 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4461 BGE_CLRBIT(sc, BGE_MAC_MODE,
4462 BGE_MACMODE_HALF_DUPLEX);
4463 } else {
4464 BGE_SETBIT(sc, BGE_MAC_MODE,
4465 BGE_MACMODE_HALF_DUPLEX);
4466 }
4467 break;
4468 default:
4469 return (EINVAL);
4470 }
4471 return (0);
4472 }
4473
4474 sc->bge_link_evt++;
4475 mii = device_get_softc(sc->bge_miibus);
4476 if (mii->mii_instance)
4477 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4478 mii_phy_reset(miisc);
4479 mii_mediachg(mii);
4480
4481 /*
4482 * Force an interrupt so that we will call bge_link_upd
4483 * if needed and clear any pending link state attention.
4484 * Without this we are not getting any further interrupts
4485 * for link state changes and thus will not UP the link and
4486 * not be able to send in bge_start_locked. The only
4487 * way to get things working was to receive a packet and
4488 * get an RX intr.
4489 * bge_tick should help for fiber cards and we might not
4490 * need to do this here if BGE_FLAG_TBI is set but as
4491 * we poll for fiber anyway it should not harm.
4492 */
4493 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4494 sc->bge_flags & BGE_FLAG_5788)
4495 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4496 else
4497 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4498
4499 return (0);
4500}
4501
4502/*
4503 * Report current media status.
4504 */
4505static void
4506bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4507{
4508 struct bge_softc *sc = ifp->if_softc;
4509 struct mii_data *mii;
4510
4511 BGE_LOCK(sc);
4512
4513 if (sc->bge_flags & BGE_FLAG_TBI) {
4514 ifmr->ifm_status = IFM_AVALID;
4515 ifmr->ifm_active = IFM_ETHER;
4516 if (CSR_READ_4(sc, BGE_MAC_STS) &
4517 BGE_MACSTAT_TBI_PCS_SYNCHED)
4518 ifmr->ifm_status |= IFM_ACTIVE;
4519 else {
4520 ifmr->ifm_active |= IFM_NONE;
4521 BGE_UNLOCK(sc);
4522 return;
4523 }
4524 ifmr->ifm_active |= IFM_1000_SX;
4525 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4526 ifmr->ifm_active |= IFM_HDX;
4527 else
4528 ifmr->ifm_active |= IFM_FDX;
4529 BGE_UNLOCK(sc);
4530 return;
4531 }
4532
4533 mii = device_get_softc(sc->bge_miibus);
4534 mii_pollstat(mii);
4535 ifmr->ifm_active = mii->mii_media_active;
4536 ifmr->ifm_status = mii->mii_media_status;
4537
4538 BGE_UNLOCK(sc);
4539}
4540
4541static int
4542bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4543{
4544 struct bge_softc *sc = ifp->if_softc;
4545 struct ifreq *ifr = (struct ifreq *) data;
4546 struct mii_data *mii;
4547 int flags, mask, error = 0;
4548
4549 switch (command) {
4550 case SIOCSIFMTU:
4551 if (ifr->ifr_mtu < ETHERMIN ||
4552 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4553 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4554 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4555 ifr->ifr_mtu > ETHERMTU))
4556 error = EINVAL;
4557 else if (ifp->if_mtu != ifr->ifr_mtu) {
4558 ifp->if_mtu = ifr->ifr_mtu;
4559 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4560 bge_init(sc);
4561 }
4562 break;
4563 case SIOCSIFFLAGS:
4564 BGE_LOCK(sc);
4565 if (ifp->if_flags & IFF_UP) {
4566 /*
4567 * If only the state of the PROMISC flag changed,
4568 * then just use the 'set promisc mode' command
4569 * instead of reinitializing the entire NIC. Doing
4570 * a full re-init means reloading the firmware and
4571 * waiting for it to start up, which may take a
4572 * second or two. Similarly for ALLMULTI.
4573 */
4574 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4575 flags = ifp->if_flags ^ sc->bge_if_flags;
4576 if (flags & IFF_PROMISC)
4577 bge_setpromisc(sc);
4578 if (flags & IFF_ALLMULTI)
4579 bge_setmulti(sc);
4580 } else
4581 bge_init_locked(sc);
4582 } else {
4583 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4584 bge_stop(sc);
4585 }
4586 }
4587 sc->bge_if_flags = ifp->if_flags;
4588 BGE_UNLOCK(sc);
4589 error = 0;
4590 break;
4591 case SIOCADDMULTI:
4592 case SIOCDELMULTI:
4593 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4594 BGE_LOCK(sc);
4595 bge_setmulti(sc);
4596 BGE_UNLOCK(sc);
4597 error = 0;
4598 }
4599 break;
4600 case SIOCSIFMEDIA:
4601 case SIOCGIFMEDIA:
4602 if (sc->bge_flags & BGE_FLAG_TBI) {
4603 error = ifmedia_ioctl(ifp, ifr,
4604 &sc->bge_ifmedia, command);
4605 } else {
4606 mii = device_get_softc(sc->bge_miibus);
4607 error = ifmedia_ioctl(ifp, ifr,
4608 &mii->mii_media, command);
4609 }
4610 break;
4611 case SIOCSIFCAP:
4612 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4613#ifdef DEVICE_POLLING
4614 if (mask & IFCAP_POLLING) {
4615 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4616 error = ether_poll_register(bge_poll, ifp);
4617 if (error)
4618 return (error);
4619 BGE_LOCK(sc);
4620 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4621 BGE_PCIMISCCTL_MASK_PCI_INTR);
4622 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4623 ifp->if_capenable |= IFCAP_POLLING;
4624 BGE_UNLOCK(sc);
4625 } else {
4626 error = ether_poll_deregister(ifp);
4627 /* Enable interrupt even in error case */
4628 BGE_LOCK(sc);
4629 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4630 BGE_PCIMISCCTL_MASK_PCI_INTR);
4631 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4632 ifp->if_capenable &= ~IFCAP_POLLING;
4633 BGE_UNLOCK(sc);
4634 }
4635 }
4636#endif
4637 if ((mask & IFCAP_TXCSUM) != 0 &&
4638 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
4639 ifp->if_capenable ^= IFCAP_TXCSUM;
4640 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
4641 ifp->if_hwassist |= sc->bge_csum_features;
4642 else
4643 ifp->if_hwassist &= ~sc->bge_csum_features;
4644 }
4645
4646 if ((mask & IFCAP_RXCSUM) != 0 &&
4647 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
4648 ifp->if_capenable ^= IFCAP_RXCSUM;
4649
4650 if ((mask & IFCAP_TSO4) != 0 &&
4651 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4652 ifp->if_capenable ^= IFCAP_TSO4;
4653 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4654 ifp->if_hwassist |= CSUM_TSO;
4655 else
4656 ifp->if_hwassist &= ~CSUM_TSO;
4657 }
4658
4659 if (mask & IFCAP_VLAN_MTU) {
4660 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4661 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4662 bge_init(sc);
4663 }
4664
4665 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
4666 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
4667 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4668 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
4669 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
4670 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4671 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
4672 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
4673 BGE_LOCK(sc);
4674 bge_setvlan(sc);
4675 BGE_UNLOCK(sc);
4676 }
4677#ifdef VLAN_CAPABILITIES
4678 VLAN_CAPABILITIES(ifp);
4679#endif
4680 break;
4681 default:
4682 error = ether_ioctl(ifp, command, data);
4683 break;
4684 }
4685
4686 return (error);
4687}
4688
4689static void
4690bge_watchdog(struct bge_softc *sc)
4691{
4692 struct ifnet *ifp;
4693
4694 BGE_LOCK_ASSERT(sc);
4695
4696 if (sc->bge_timer == 0 || --sc->bge_timer)
4697 return;
4698
4699 ifp = sc->bge_ifp;
4700
4701 if_printf(ifp, "watchdog timeout -- resetting\n");
4702
4703 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4704 bge_init_locked(sc);
4705
4706 ifp->if_oerrors++;
4707}
4708
4709/*
4710 * Stop the adapter and free any mbufs allocated to the
4711 * RX and TX lists.
4712 */
4713static void
4714bge_stop(struct bge_softc *sc)
4715{
4716 struct ifnet *ifp;
4717
4718 BGE_LOCK_ASSERT(sc);
4719
4720 ifp = sc->bge_ifp;
4721
4722 callout_stop(&sc->bge_stat_ch);
4723
4724 /* Disable host interrupts. */
4725 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4726 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4727
4728 /*
4729 * Tell firmware we're shutting down.
4730 */
4731 bge_stop_fw(sc);
4732 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4733
4734 /*
4735 * Disable all of the receiver blocks.
4736 */
4737 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4738 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4739 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4740 if (!(BGE_IS_5705_PLUS(sc)))
4741 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4742 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4743 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4744 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4745
4746 /*
4747 * Disable all of the transmit blocks.
4748 */
4749 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4750 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4751 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4752 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4753 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4754 if (!(BGE_IS_5705_PLUS(sc)))
4755 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4756 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4757
4758 /*
4759 * Shut down all of the memory managers and related
4760 * state machines.
4761 */
4762 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4763 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4764 if (!(BGE_IS_5705_PLUS(sc)))
4765 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4766 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4767 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4768 if (!(BGE_IS_5705_PLUS(sc))) {
4769 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4770 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4771 }
4772 /* Update MAC statistics. */
4773 if (BGE_IS_5705_PLUS(sc))
4774 bge_stats_update_regs(sc);
4775
4776 bge_reset(sc);
4777 bge_sig_legacy(sc, BGE_RESET_STOP);
4778 bge_sig_post_reset(sc, BGE_RESET_STOP);
4779
4780 /*
4781 * Keep the ASF firmware running if up.
4782 */
4783 if (sc->bge_asf_mode & ASF_STACKUP)
4784 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4785 else
4786 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4787
4788 /* Free the RX lists. */
4789 bge_free_rx_ring_std(sc);
4790
4791 /* Free jumbo RX list. */
4792 if (BGE_IS_JUMBO_CAPABLE(sc))
4793 bge_free_rx_ring_jumbo(sc);
4794
4795 /* Free TX buffers. */
4796 bge_free_tx_ring(sc);
4797
4798 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4799
4800 /* Clear MAC's link state (PHY may still have link UP). */
4801 if (bootverbose && sc->bge_link)
4802 if_printf(sc->bge_ifp, "link DOWN\n");
4803 sc->bge_link = 0;
4804
4805 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4806}
4807
4808/*
4809 * Stop all chip I/O so that the kernel's probe routines don't
4810 * get confused by errant DMAs when rebooting.
4811 */
4812static int
4813bge_shutdown(device_t dev)
4814{
4815 struct bge_softc *sc;
4816
4817 sc = device_get_softc(dev);
4818 BGE_LOCK(sc);
4819 bge_stop(sc);
4820 bge_reset(sc);
4821 BGE_UNLOCK(sc);
4822
4823 return (0);
4824}
4825
4826static int
4827bge_suspend(device_t dev)
4828{
4829 struct bge_softc *sc;
4830
4831 sc = device_get_softc(dev);
4832 BGE_LOCK(sc);
4833 bge_stop(sc);
4834 BGE_UNLOCK(sc);
4835
4836 return (0);
4837}
4838
4839static int
4840bge_resume(device_t dev)
4841{
4842 struct bge_softc *sc;
4843 struct ifnet *ifp;
4844
4845 sc = device_get_softc(dev);
4846 BGE_LOCK(sc);
4847 ifp = sc->bge_ifp;
4848 if (ifp->if_flags & IFF_UP) {
4849 bge_init_locked(sc);
4850 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4851 bge_start_locked(ifp);
4852 }
4853 BGE_UNLOCK(sc);
4854
4855 return (0);
4856}
4857
4858static void
4859bge_link_upd(struct bge_softc *sc)
4860{
4861 struct mii_data *mii;
4862 uint32_t link, status;
4863
4864 BGE_LOCK_ASSERT(sc);
4865
4866 /* Clear 'pending link event' flag. */
4867 sc->bge_link_evt = 0;
4868
4869 /*
4870 * Process link state changes.
4871 * Grrr. The link status word in the status block does
4872 * not work correctly on the BCM5700 rev AX and BX chips,
4873 * according to all available information. Hence, we have
4874 * to enable MII interrupts in order to properly obtain
4875 * async link changes. Unfortunately, this also means that
4876 * we have to read the MAC status register to detect link
4877 * changes, thereby adding an additional register access to
4878 * the interrupt handler.
4879 *
4880 * XXX: perhaps link state detection procedure used for
4881 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4882 */
4883
4884 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4885 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4886 status = CSR_READ_4(sc, BGE_MAC_STS);
4887 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4888 mii = device_get_softc(sc->bge_miibus);
4889 mii_pollstat(mii);
4890 if (!sc->bge_link &&
4891 mii->mii_media_status & IFM_ACTIVE &&
4892 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4893 sc->bge_link++;
4894 if (bootverbose)
4895 if_printf(sc->bge_ifp, "link UP\n");
4896 } else if (sc->bge_link &&
4897 (!(mii->mii_media_status & IFM_ACTIVE) ||
4898 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4899 sc->bge_link = 0;
4900 if (bootverbose)
4901 if_printf(sc->bge_ifp, "link DOWN\n");
4902 }
4903
4904 /* Clear the interrupt. */
4905 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4906 BGE_EVTENB_MI_INTERRUPT);
4907 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4908 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4909 BRGPHY_INTRS);
4910 }
4911 return;
4912 }
4913
4914 if (sc->bge_flags & BGE_FLAG_TBI) {
4915 status = CSR_READ_4(sc, BGE_MAC_STS);
4916 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4917 if (!sc->bge_link) {
4918 sc->bge_link++;
4919 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4920 BGE_CLRBIT(sc, BGE_MAC_MODE,
4921 BGE_MACMODE_TBI_SEND_CFGS);
4922 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4923 if (bootverbose)
4924 if_printf(sc->bge_ifp, "link UP\n");
4925 if_link_state_change(sc->bge_ifp,
4926 LINK_STATE_UP);
4927 }
4928 } else if (sc->bge_link) {
4929 sc->bge_link = 0;
4930 if (bootverbose)
4931 if_printf(sc->bge_ifp, "link DOWN\n");
4932 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4933 }
4934 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4935 /*
4936 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4937 * in status word always set. Workaround this bug by reading
4938 * PHY link status directly.
4939 */
4940 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4941
4942 if (link != sc->bge_link ||
4943 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4944 mii = device_get_softc(sc->bge_miibus);
4945 mii_pollstat(mii);
4946 if (!sc->bge_link &&
4947 mii->mii_media_status & IFM_ACTIVE &&
4948 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4949 sc->bge_link++;
4950 if (bootverbose)
4951 if_printf(sc->bge_ifp, "link UP\n");
4952 } else if (sc->bge_link &&
4953 (!(mii->mii_media_status & IFM_ACTIVE) ||
4954 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4955 sc->bge_link = 0;
4956 if (bootverbose)
4957 if_printf(sc->bge_ifp, "link DOWN\n");
4958 }
4959 }
4960 } else {
4961 /*
4962 * Discard link events for MII/GMII controllers
4963 * if MI auto-polling is disabled.
4964 */
4965 }
4966
4967 /* Clear the attention. */
4968 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4969 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4970 BGE_MACSTAT_LINK_CHANGED);
4971}
4972
4973static void
4974bge_add_sysctls(struct bge_softc *sc)
4975{
4976 struct sysctl_ctx_list *ctx;
4977 struct sysctl_oid_list *children;
4978 char tn[32];
4979 int unit;
4980
4981 ctx = device_get_sysctl_ctx(sc->bge_dev);
4982 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4983
4984#ifdef BGE_REGISTER_DEBUG
4985 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4986 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4987 "Debug Information");
4988
4989 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4990 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4991 "Register Read");
4992
4993 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4994 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4995 "Memory Read");
4996
4997#endif
4998
4999 unit = device_get_unit(sc->bge_dev);
5000 /*
5001 * A common design characteristic for many Broadcom client controllers
5002 * is that they only support a single outstanding DMA read operation
5003 * on the PCIe bus. This means that it will take twice as long to fetch
5004 * a TX frame that is split into header and payload buffers as it does
5005 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5006 * these controllers, coalescing buffers to reduce the number of memory
5007 * reads is effective way to get maximum performance(about 940Mbps).
5008 * Without collapsing TX buffers the maximum TCP bulk transfer
5009 * performance is about 850Mbps. However forcing coalescing mbufs
5010 * consumes a lot of CPU cycles, so leave it off by default.
5011 */
5012 sc->bge_forced_collapse = 0;
5013 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5014 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5015 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5016 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5017 "Number of fragmented TX buffers of a frame allowed before "
5018 "forced collapsing");
5019
5020 /*
5021 * It seems all Broadcom controllers have a bug that can generate UDP
5022 * datagrams with checksum value 0 when TX UDP checksum offloading is
5023 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5024 * Even though the probability of generating such UDP datagrams is
5025 * low, I don't want to see FreeBSD boxes to inject such datagrams
5026 * into network so disable UDP checksum offloading by default. Users
5027 * still override this behavior by setting a sysctl variable,
5028 * dev.bge.0.forced_udpcsum.
5029 */
5030 sc->bge_forced_udpcsum = 0;
5031 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5032 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5033 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5034 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5035 "Enable UDP checksum offloading even if controller can "
5036 "generate UDP checksum value 0");
5037
5038 if (BGE_IS_5705_PLUS(sc))
5039 bge_add_sysctl_stats_regs(sc, ctx, children);
5040 else
5041 bge_add_sysctl_stats(sc, ctx, children);
5042}
5043
5044#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5045 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5046 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5047 desc)
5048
5049static void
5050bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5051 struct sysctl_oid_list *parent)
5052{
5053 struct sysctl_oid *tree;
5054 struct sysctl_oid_list *children, *schildren;
5055
5056 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5057 NULL, "BGE Statistics");
5058 schildren = children = SYSCTL_CHILDREN(tree);
5059 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5060 children, COSFramesDroppedDueToFilters,
5061 "FramesDroppedDueToFilters");
5062 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5063 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5064 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5065 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5066 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5067 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5068 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5069 children, ifInDiscards, "InputDiscards");
5070 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5071 children, ifInErrors, "InputErrors");
5072 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5073 children, nicRecvThresholdHit, "RecvThresholdHit");
5074 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5075 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5076 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5077 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5078 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5079 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5080 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5081 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5082 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5083 children, nicRingStatusUpdate, "RingStatusUpdate");
5084 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5085 children, nicInterrupts, "Interrupts");
5086 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5087 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5088 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5089 children, nicSendThresholdHit, "SendThresholdHit");
5090
5091 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5092 NULL, "BGE RX Statistics");
5093 children = SYSCTL_CHILDREN(tree);
5094 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5095 children, rxstats.ifHCInOctets, "ifHCInOctets");
5096 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5097 children, rxstats.etherStatsFragments, "Fragments");
5098 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5099 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5100 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5101 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5102 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5103 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5104 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5105 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5106 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5107 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5108 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5109 children, rxstats.xoffPauseFramesReceived,
5110 "xoffPauseFramesReceived");
5111 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5112 children, rxstats.macControlFramesReceived,
5113 "ControlFramesReceived");
5114 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5115 children, rxstats.xoffStateEntered, "xoffStateEntered");
5116 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5117 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5118 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5119 children, rxstats.etherStatsJabbers, "Jabbers");
5120 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5121 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5122 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5123 children, rxstats.inRangeLengthError, "inRangeLengthError");
5124 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5125 children, rxstats.outRangeLengthError, "outRangeLengthError");
5126
5127 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5128 NULL, "BGE TX Statistics");
5129 children = SYSCTL_CHILDREN(tree);
5130 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5131 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5132 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5133 children, txstats.etherStatsCollisions, "Collisions");
5134 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5135 children, txstats.outXonSent, "XonSent");
5136 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5137 children, txstats.outXoffSent, "XoffSent");
5138 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5139 children, txstats.flowControlDone, "flowControlDone");
5140 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5141 children, txstats.dot3StatsInternalMacTransmitErrors,
5142 "InternalMacTransmitErrors");
5143 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5144 children, txstats.dot3StatsSingleCollisionFrames,
5145 "SingleCollisionFrames");
5146 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5147 children, txstats.dot3StatsMultipleCollisionFrames,
5148 "MultipleCollisionFrames");
5149 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5150 children, txstats.dot3StatsDeferredTransmissions,
5151 "DeferredTransmissions");
5152 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5153 children, txstats.dot3StatsExcessiveCollisions,
5154 "ExcessiveCollisions");
5155 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5156 children, txstats.dot3StatsLateCollisions,
5157 "LateCollisions");
5158 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5159 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5160 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5161 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5162 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5163 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5164 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5165 children, txstats.dot3StatsCarrierSenseErrors,
5166 "CarrierSenseErrors");
5167 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5168 children, txstats.ifOutDiscards, "Discards");
5169 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5170 children, txstats.ifOutErrors, "Errors");
5171}
5172
5173#undef BGE_SYSCTL_STAT
5174
5175#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5176 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5177
5178static void
5179bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5180 struct sysctl_oid_list *parent)
5181{
5182 struct sysctl_oid *tree;
5183 struct sysctl_oid_list *child, *schild;
5184 struct bge_mac_stats *stats;
5185
5186 stats = &sc->bge_mac_stats;
5187 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5188 NULL, "BGE Statistics");
5189 schild = child = SYSCTL_CHILDREN(tree);
5190 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5191 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5192 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5193 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5194 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5195 &stats->DmaWriteHighPriQueueFull,
5196 "NIC DMA Write High Priority Queue Full");
5197 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5198 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5199 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5200 &stats->InputDiscards, "Discarded Input Frames");
5201 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5202 &stats->InputErrors, "Input Errors");
5203 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5204 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5205
5206 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5207 NULL, "BGE RX Statistics");
5208 child = SYSCTL_CHILDREN(tree);
5209 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5210 &stats->ifHCInOctets, "Inbound Octets");
5211 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5212 &stats->etherStatsFragments, "Fragments");
5213 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5214 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5215 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5216 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5217 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5218 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5219 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5220 &stats->dot3StatsFCSErrors, "FCS Errors");
5221 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5222 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5223 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5224 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5225 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5226 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5227 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5228 &stats->macControlFramesReceived, "MAC Control Frames Received");
5229 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5230 &stats->xoffStateEntered, "XOFF State Entered");
5231 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5232 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5233 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5234 &stats->etherStatsJabbers, "Jabbers");
5235 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5236 &stats->etherStatsUndersizePkts, "Undersized Packets");
5237
5238 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5239 NULL, "BGE TX Statistics");
5240 child = SYSCTL_CHILDREN(tree);
5241 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5242 &stats->ifHCOutOctets, "Outbound Octets");
5243 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5244 &stats->etherStatsCollisions, "TX Collisions");
5245 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5246 &stats->outXonSent, "XON Sent");
5247 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5248 &stats->outXoffSent, "XOFF Sent");
5249 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5250 &stats->dot3StatsInternalMacTransmitErrors,
5251 "Internal MAC TX Errors");
5252 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5253 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5254 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5255 &stats->dot3StatsMultipleCollisionFrames,
5256 "Multiple Collision Frames");
5257 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5258 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5259 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5260 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5261 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5262 &stats->dot3StatsLateCollisions, "Late Collisions");
5263 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5264 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5265 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5266 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5267 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5268 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5269}
5270
5271#undef BGE_SYSCTL_STAT_ADD64
5272
5273static int
5274bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5275{
5276 struct bge_softc *sc;
5277 uint32_t result;
5278 int offset;
5279
5280 sc = (struct bge_softc *)arg1;
5281 offset = arg2;
5282 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5283 offsetof(bge_hostaddr, bge_addr_lo));
5284 return (sysctl_handle_int(oidp, &result, 0, req));
5285}
5286
5287#ifdef BGE_REGISTER_DEBUG
5288static int
5289bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5290{
5291 struct bge_softc *sc;
5292 uint16_t *sbdata;
5293 int error;
5294 int result;
5295 int i, j;
5296
5297 result = -1;
5298 error = sysctl_handle_int(oidp, &result, 0, req);
5299 if (error || (req->newptr == NULL))
5300 return (error);
5301
5302 if (result == 1) {
5303 sc = (struct bge_softc *)arg1;
5304
5305 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5306 printf("Status Block:\n");
5307 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5308 printf("%06x:", i);
5309 for (j = 0; j < 8; j++) {
5310 printf(" %04x", sbdata[i]);
5311 i += 4;
5312 }
5313 printf("\n");
5314 }
5315
5316 printf("Registers:\n");
5317 for (i = 0x800; i < 0xA00; ) {
5318 printf("%06x:", i);
5319 for (j = 0; j < 8; j++) {
5320 printf(" %08x", CSR_READ_4(sc, i));
5321 i += 4;
5322 }
5323 printf("\n");
5324 }
5325
5326 printf("Hardware Flags:\n");
5327 if (BGE_IS_5755_PLUS(sc))
5328 printf(" - 5755 Plus\n");
5329 if (BGE_IS_575X_PLUS(sc))
5330 printf(" - 575X Plus\n");
5331 if (BGE_IS_5705_PLUS(sc))
5332 printf(" - 5705 Plus\n");
5333 if (BGE_IS_5714_FAMILY(sc))
5334 printf(" - 5714 Family\n");
5335 if (BGE_IS_5700_FAMILY(sc))
5336 printf(" - 5700 Family\n");
5337 if (sc->bge_flags & BGE_FLAG_JUMBO)
5338 printf(" - Supports Jumbo Frames\n");
5339 if (sc->bge_flags & BGE_FLAG_PCIX)
5340 printf(" - PCI-X Bus\n");
5341 if (sc->bge_flags & BGE_FLAG_PCIE)
5342 printf(" - PCI Express Bus\n");
5343 if (sc->bge_flags & BGE_FLAG_NO_3LED)
5344 printf(" - No 3 LEDs\n");
5345 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5346 printf(" - RX Alignment Bug\n");
5347 }
5348
5349 return (error);
5350}
5351
5352static int
5353bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5354{
5355 struct bge_softc *sc;
5356 int error;
5357 uint16_t result;
5358 uint32_t val;
5359
5360 result = -1;
5361 error = sysctl_handle_int(oidp, &result, 0, req);
5362 if (error || (req->newptr == NULL))
5363 return (error);
5364
5365 if (result < 0x8000) {
5366 sc = (struct bge_softc *)arg1;
5367 val = CSR_READ_4(sc, result);
5368 printf("reg 0x%06X = 0x%08X\n", result, val);
5369 }
5370
5371 return (error);
5372}
5373
5374static int
5375bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5376{
5377 struct bge_softc *sc;
5378 int error;
5379 uint16_t result;
5380 uint32_t val;
5381
5382 result = -1;
5383 error = sysctl_handle_int(oidp, &result, 0, req);
5384 if (error || (req->newptr == NULL))
5385 return (error);
5386
5387 if (result < 0x8000) {
5388 sc = (struct bge_softc *)arg1;
5389 val = bge_readmem_ind(sc, result);
5390 printf("mem 0x%06X = 0x%08X\n", result, val);
5391 }
5392
5393 return (error);
5394}
5395#endif
5396
5397static int
5398bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5399{
5400
5401 if (sc->bge_flags & BGE_FLAG_EADDR)
5402 return (1);
5403
5404#ifdef __sparc64__
5405 OF_getetheraddr(sc->bge_dev, ether_addr);
5406 return (0);
5407#endif
5408 return (1);
5409}
5410
5411static int
5412bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5413{
5414 uint32_t mac_addr;
5415
5416 mac_addr = bge_readmem_ind(sc, 0x0c14);
5417 if ((mac_addr >> 16) == 0x484b) {
5418 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5419 ether_addr[1] = (uint8_t)mac_addr;
5420 mac_addr = bge_readmem_ind(sc, 0x0c18);
5421 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5422 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5423 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5424 ether_addr[5] = (uint8_t)mac_addr;
5425 return (0);
5426 }
5427 return (1);
5428}
5429
5430static int
5431bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5432{
5433 int mac_offset = BGE_EE_MAC_OFFSET;
5434
5435 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5436 mac_offset = BGE_EE_MAC_OFFSET_5906;
5437
5438 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5439 ETHER_ADDR_LEN));
5440}
5441
5442static int
5443bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5444{
5445
5446 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5447 return (1);
5448
5449 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5450 ETHER_ADDR_LEN));
5451}
5452
5453static int
5454bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5455{
5456 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5457 /* NOTE: Order is critical */
5458 bge_get_eaddr_fw,
5459 bge_get_eaddr_mem,
5460 bge_get_eaddr_nvram,
5461 bge_get_eaddr_eeprom,
5462 NULL
5463 };
5464 const bge_eaddr_fcn_t *func;
5465
5466 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5467 if ((*func)(sc, eaddr) == 0)
5468 break;
5469 }
5470 return (*func == NULL ? ENXIO : 0);
5471}
1874 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1875 DELAY(40);
1876
1877 /* Turn on RX data completion state machine */
1878 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1879
1880 /* Turn on RX BD initiator state machine */
1881 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1882
1883 /* Turn on RX data and RX BD initiator state machine */
1884 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1885
1886 /* Turn on Mbuf cluster free state machine */
1887 if (!(BGE_IS_5705_PLUS(sc)))
1888 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1889
1890 /* Turn on send BD completion state machine */
1891 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1892
1893 /* Turn on send data completion state machine */
1894 val = BGE_SDCMODE_ENABLE;
1895 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1896 val |= BGE_SDCMODE_CDELAY;
1897 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1898
1899 /* Turn on send data initiator state machine */
1900 if (sc->bge_flags & BGE_FLAG_TSO)
1901 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1902 else
1903 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1904
1905 /* Turn on send BD initiator state machine */
1906 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1907
1908 /* Turn on send BD selector state machine */
1909 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1910
1911 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1912 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1913 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1914
1915 /* ack/clear link change events */
1916 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1917 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1918 BGE_MACSTAT_LINK_CHANGED);
1919 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1920
1921 /* Enable PHY auto polling (for MII/GMII only) */
1922 if (sc->bge_flags & BGE_FLAG_TBI) {
1923 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1924 } else {
1925 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1926 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1927 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1928 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1929 BGE_EVTENB_MI_INTERRUPT);
1930 }
1931
1932 /*
1933 * Clear any pending link state attention.
1934 * Otherwise some link state change events may be lost until attention
1935 * is cleared by bge_intr() -> bge_link_upd() sequence.
1936 * It's not necessary on newer BCM chips - perhaps enabling link
1937 * state change attentions implies clearing pending attention.
1938 */
1939 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1940 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1941 BGE_MACSTAT_LINK_CHANGED);
1942
1943 /* Enable link state change attentions. */
1944 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1945
1946 return (0);
1947}
1948
1949const struct bge_revision *
1950bge_lookup_rev(uint32_t chipid)
1951{
1952 const struct bge_revision *br;
1953
1954 for (br = bge_revisions; br->br_name != NULL; br++) {
1955 if (br->br_chipid == chipid)
1956 return (br);
1957 }
1958
1959 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1960 if (br->br_chipid == BGE_ASICREV(chipid))
1961 return (br);
1962 }
1963
1964 return (NULL);
1965}
1966
1967const struct bge_vendor *
1968bge_lookup_vendor(uint16_t vid)
1969{
1970 const struct bge_vendor *v;
1971
1972 for (v = bge_vendors; v->v_name != NULL; v++)
1973 if (v->v_id == vid)
1974 return (v);
1975
1976 panic("%s: unknown vendor %d", __func__, vid);
1977 return (NULL);
1978}
1979
1980/*
1981 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1982 * against our list and return its name if we find a match.
1983 *
1984 * Note that since the Broadcom controller contains VPD support, we
1985 * try to get the device name string from the controller itself instead
1986 * of the compiled-in string. It guarantees we'll always announce the
1987 * right product name. We fall back to the compiled-in string when
1988 * VPD is unavailable or corrupt.
1989 */
1990static int
1991bge_probe(device_t dev)
1992{
1993 const struct bge_type *t = bge_devs;
1994 struct bge_softc *sc = device_get_softc(dev);
1995 uint16_t vid, did;
1996
1997 sc->bge_dev = dev;
1998 vid = pci_get_vendor(dev);
1999 did = pci_get_device(dev);
2000 while(t->bge_vid != 0) {
2001 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2002 char model[64], buf[96];
2003 const struct bge_revision *br;
2004 const struct bge_vendor *v;
2005 uint32_t id;
2006
2007 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2008 BGE_PCIMISCCTL_ASICREV_SHIFT;
2009 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
2010 id = pci_read_config(dev,
2011 BGE_PCI_PRODID_ASICREV, 4);
2012 br = bge_lookup_rev(id);
2013 v = bge_lookup_vendor(vid);
2014 {
2015#if __FreeBSD_version > 700024
2016 const char *pname;
2017
2018 if (bge_has_eaddr(sc) &&
2019 pci_get_vpd_ident(dev, &pname) == 0)
2020 snprintf(model, 64, "%s", pname);
2021 else
2022#endif
2023 snprintf(model, 64, "%s %s",
2024 v->v_name,
2025 br != NULL ? br->br_name :
2026 "NetXtreme Ethernet Controller");
2027 }
2028 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2029 br != NULL ? "" : "unknown ", id);
2030 device_set_desc_copy(dev, buf);
2031 return (0);
2032 }
2033 t++;
2034 }
2035
2036 return (ENXIO);
2037}
2038
2039static void
2040bge_dma_free(struct bge_softc *sc)
2041{
2042 int i;
2043
2044 /* Destroy DMA maps for RX buffers. */
2045 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2046 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2047 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2048 sc->bge_cdata.bge_rx_std_dmamap[i]);
2049 }
2050 if (sc->bge_cdata.bge_rx_std_sparemap)
2051 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2052 sc->bge_cdata.bge_rx_std_sparemap);
2053
2054 /* Destroy DMA maps for jumbo RX buffers. */
2055 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2056 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2057 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2058 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2059 }
2060 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2061 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2062 sc->bge_cdata.bge_rx_jumbo_sparemap);
2063
2064 /* Destroy DMA maps for TX buffers. */
2065 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2066 if (sc->bge_cdata.bge_tx_dmamap[i])
2067 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2068 sc->bge_cdata.bge_tx_dmamap[i]);
2069 }
2070
2071 if (sc->bge_cdata.bge_rx_mtag)
2072 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2073 if (sc->bge_cdata.bge_tx_mtag)
2074 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2075
2076
2077 /* Destroy standard RX ring. */
2078 if (sc->bge_cdata.bge_rx_std_ring_map)
2079 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2080 sc->bge_cdata.bge_rx_std_ring_map);
2081 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2082 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2083 sc->bge_ldata.bge_rx_std_ring,
2084 sc->bge_cdata.bge_rx_std_ring_map);
2085
2086 if (sc->bge_cdata.bge_rx_std_ring_tag)
2087 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2088
2089 /* Destroy jumbo RX ring. */
2090 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2091 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2092 sc->bge_cdata.bge_rx_jumbo_ring_map);
2093
2094 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2095 sc->bge_ldata.bge_rx_jumbo_ring)
2096 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2097 sc->bge_ldata.bge_rx_jumbo_ring,
2098 sc->bge_cdata.bge_rx_jumbo_ring_map);
2099
2100 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2101 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2102
2103 /* Destroy RX return ring. */
2104 if (sc->bge_cdata.bge_rx_return_ring_map)
2105 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2106 sc->bge_cdata.bge_rx_return_ring_map);
2107
2108 if (sc->bge_cdata.bge_rx_return_ring_map &&
2109 sc->bge_ldata.bge_rx_return_ring)
2110 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2111 sc->bge_ldata.bge_rx_return_ring,
2112 sc->bge_cdata.bge_rx_return_ring_map);
2113
2114 if (sc->bge_cdata.bge_rx_return_ring_tag)
2115 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2116
2117 /* Destroy TX ring. */
2118 if (sc->bge_cdata.bge_tx_ring_map)
2119 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2120 sc->bge_cdata.bge_tx_ring_map);
2121
2122 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2123 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2124 sc->bge_ldata.bge_tx_ring,
2125 sc->bge_cdata.bge_tx_ring_map);
2126
2127 if (sc->bge_cdata.bge_tx_ring_tag)
2128 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2129
2130 /* Destroy status block. */
2131 if (sc->bge_cdata.bge_status_map)
2132 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2133 sc->bge_cdata.bge_status_map);
2134
2135 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2136 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2137 sc->bge_ldata.bge_status_block,
2138 sc->bge_cdata.bge_status_map);
2139
2140 if (sc->bge_cdata.bge_status_tag)
2141 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2142
2143 /* Destroy statistics block. */
2144 if (sc->bge_cdata.bge_stats_map)
2145 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2146 sc->bge_cdata.bge_stats_map);
2147
2148 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2149 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2150 sc->bge_ldata.bge_stats,
2151 sc->bge_cdata.bge_stats_map);
2152
2153 if (sc->bge_cdata.bge_stats_tag)
2154 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2155
2156 if (sc->bge_cdata.bge_buffer_tag)
2157 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2158
2159 /* Destroy the parent tag. */
2160 if (sc->bge_cdata.bge_parent_tag)
2161 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2162}
2163
2164static int
2165bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2166 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2167 bus_addr_t *paddr, const char *msg)
2168{
2169 struct bge_dmamap_arg ctx;
2170 bus_addr_t lowaddr;
2171 bus_size_t ring_end;
2172 int error;
2173
2174 lowaddr = BUS_SPACE_MAXADDR;
2175again:
2176 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2177 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2178 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2179 if (error != 0) {
2180 device_printf(sc->bge_dev,
2181 "could not create %s dma tag\n", msg);
2182 return (ENOMEM);
2183 }
2184 /* Allocate DMA'able memory for ring. */
2185 error = bus_dmamem_alloc(*tag, (void **)ring,
2186 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2187 if (error != 0) {
2188 device_printf(sc->bge_dev,
2189 "could not allocate DMA'able memory for %s\n", msg);
2190 return (ENOMEM);
2191 }
2192 /* Load the address of the ring. */
2193 ctx.bge_busaddr = 0;
2194 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2195 &ctx, BUS_DMA_NOWAIT);
2196 if (error != 0) {
2197 device_printf(sc->bge_dev,
2198 "could not load DMA'able memory for %s\n", msg);
2199 return (ENOMEM);
2200 }
2201 *paddr = ctx.bge_busaddr;
2202 ring_end = *paddr + maxsize;
2203 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2204 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2205 /*
2206 * 4GB boundary crossed. Limit maximum allowable DMA
2207 * address space to 32bit and try again.
2208 */
2209 bus_dmamap_unload(*tag, *map);
2210 bus_dmamem_free(*tag, *ring, *map);
2211 bus_dma_tag_destroy(*tag);
2212 if (bootverbose)
2213 device_printf(sc->bge_dev, "4GB boundary crossed, "
2214 "limit DMA address space to 32bit for %s\n", msg);
2215 *ring = NULL;
2216 *tag = NULL;
2217 *map = NULL;
2218 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2219 goto again;
2220 }
2221 return (0);
2222}
2223
2224static int
2225bge_dma_alloc(struct bge_softc *sc)
2226{
2227 bus_addr_t lowaddr;
2228 bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2229 int i, error;
2230
2231 lowaddr = BUS_SPACE_MAXADDR;
2232 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2233 lowaddr = BGE_DMA_MAXADDR;
2234 /*
2235 * Allocate the parent bus DMA tag appropriate for PCI.
2236 */
2237 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2238 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2239 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2240 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2241 if (error != 0) {
2242 device_printf(sc->bge_dev,
2243 "could not allocate parent dma tag\n");
2244 return (ENOMEM);
2245 }
2246
2247 /* Create tag for standard RX ring. */
2248 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2249 &sc->bge_cdata.bge_rx_std_ring_tag,
2250 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2251 &sc->bge_cdata.bge_rx_std_ring_map,
2252 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2253 if (error)
2254 return (error);
2255
2256 /* Create tag for RX return ring. */
2257 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2258 &sc->bge_cdata.bge_rx_return_ring_tag,
2259 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2260 &sc->bge_cdata.bge_rx_return_ring_map,
2261 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2262 if (error)
2263 return (error);
2264
2265 /* Create tag for TX ring. */
2266 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2267 &sc->bge_cdata.bge_tx_ring_tag,
2268 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2269 &sc->bge_cdata.bge_tx_ring_map,
2270 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2271 if (error)
2272 return (error);
2273
2274 /*
2275 * Create tag for status block.
2276 * Because we only use single Tx/Rx/Rx return ring, use
2277 * minimum status block size except BCM5700 AX/BX which
2278 * seems to want to see full status block size regardless
2279 * of configured number of ring.
2280 */
2281 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2282 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2283 sbsz = BGE_STATUS_BLK_SZ;
2284 else
2285 sbsz = 32;
2286 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2287 &sc->bge_cdata.bge_status_tag,
2288 (uint8_t **)&sc->bge_ldata.bge_status_block,
2289 &sc->bge_cdata.bge_status_map,
2290 &sc->bge_ldata.bge_status_block_paddr, "status block");
2291 if (error)
2292 return (error);
2293
2294 /* Create tag for statistics block. */
2295 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2296 &sc->bge_cdata.bge_stats_tag,
2297 (uint8_t **)&sc->bge_ldata.bge_stats,
2298 &sc->bge_cdata.bge_stats_map,
2299 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2300 if (error)
2301 return (error);
2302
2303 /* Create tag for jumbo RX ring. */
2304 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2305 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2306 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2307 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2308 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2309 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2310 if (error)
2311 return (error);
2312 }
2313
2314 /* Create parent tag for buffers. */
2315 boundary = 0;
2316 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2317 boundary = BGE_DMA_BNDRY;
2318 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2319 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2320 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2321 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2322 if (error != 0) {
2323 device_printf(sc->bge_dev,
2324 "could not allocate buffer dma tag\n");
2325 return (ENOMEM);
2326 }
2327 /* Create tag for Tx mbufs. */
2328 if (sc->bge_flags & BGE_FLAG_TSO) {
2329 txsegsz = BGE_TSOSEG_SZ;
2330 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2331 } else {
2332 txsegsz = MCLBYTES;
2333 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2334 }
2335 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2336 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2337 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2338 &sc->bge_cdata.bge_tx_mtag);
2339
2340 if (error) {
2341 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2342 return (ENOMEM);
2343 }
2344
2345 /* Create tag for Rx mbufs. */
2346 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2347 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2348 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2349
2350 if (error) {
2351 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2352 return (ENOMEM);
2353 }
2354
2355 /* Create DMA maps for RX buffers. */
2356 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2357 &sc->bge_cdata.bge_rx_std_sparemap);
2358 if (error) {
2359 device_printf(sc->bge_dev,
2360 "can't create spare DMA map for RX\n");
2361 return (ENOMEM);
2362 }
2363 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2364 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2365 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2366 if (error) {
2367 device_printf(sc->bge_dev,
2368 "can't create DMA map for RX\n");
2369 return (ENOMEM);
2370 }
2371 }
2372
2373 /* Create DMA maps for TX buffers. */
2374 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2375 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2376 &sc->bge_cdata.bge_tx_dmamap[i]);
2377 if (error) {
2378 device_printf(sc->bge_dev,
2379 "can't create DMA map for TX\n");
2380 return (ENOMEM);
2381 }
2382 }
2383
2384 /* Create tags for jumbo RX buffers. */
2385 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2386 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2387 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2388 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2389 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2390 if (error) {
2391 device_printf(sc->bge_dev,
2392 "could not allocate jumbo dma tag\n");
2393 return (ENOMEM);
2394 }
2395 /* Create DMA maps for jumbo RX buffers. */
2396 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2397 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2398 if (error) {
2399 device_printf(sc->bge_dev,
2400 "can't create spare DMA map for jumbo RX\n");
2401 return (ENOMEM);
2402 }
2403 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2404 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2405 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2406 if (error) {
2407 device_printf(sc->bge_dev,
2408 "can't create DMA map for jumbo RX\n");
2409 return (ENOMEM);
2410 }
2411 }
2412 }
2413
2414 return (0);
2415}
2416
2417/*
2418 * Return true if this device has more than one port.
2419 */
2420static int
2421bge_has_multiple_ports(struct bge_softc *sc)
2422{
2423 device_t dev = sc->bge_dev;
2424 u_int b, d, f, fscan, s;
2425
2426 d = pci_get_domain(dev);
2427 b = pci_get_bus(dev);
2428 s = pci_get_slot(dev);
2429 f = pci_get_function(dev);
2430 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2431 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2432 return (1);
2433 return (0);
2434}
2435
2436/*
2437 * Return true if MSI can be used with this device.
2438 */
2439static int
2440bge_can_use_msi(struct bge_softc *sc)
2441{
2442 int can_use_msi = 0;
2443
2444 switch (sc->bge_asicrev) {
2445 case BGE_ASICREV_BCM5714_A0:
2446 case BGE_ASICREV_BCM5714:
2447 /*
2448 * Apparently, MSI doesn't work when these chips are
2449 * configured in single-port mode.
2450 */
2451 if (bge_has_multiple_ports(sc))
2452 can_use_msi = 1;
2453 break;
2454 case BGE_ASICREV_BCM5750:
2455 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2456 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2457 can_use_msi = 1;
2458 break;
2459 default:
2460 if (BGE_IS_575X_PLUS(sc))
2461 can_use_msi = 1;
2462 }
2463 return (can_use_msi);
2464}
2465
2466static int
2467bge_attach(device_t dev)
2468{
2469 struct ifnet *ifp;
2470 struct bge_softc *sc;
2471 uint32_t hwcfg = 0, misccfg;
2472 u_char eaddr[ETHER_ADDR_LEN];
2473 int error, msicount, reg, rid, trys;
2474
2475 sc = device_get_softc(dev);
2476 sc->bge_dev = dev;
2477
2478 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2479
2480 /*
2481 * Map control/status registers.
2482 */
2483 pci_enable_busmaster(dev);
2484
2485 rid = PCIR_BAR(0);
2486 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2487 RF_ACTIVE);
2488
2489 if (sc->bge_res == NULL) {
2490 device_printf (sc->bge_dev, "couldn't map memory\n");
2491 error = ENXIO;
2492 goto fail;
2493 }
2494
2495 /* Save various chip information. */
2496 sc->bge_chipid =
2497 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2498 BGE_PCIMISCCTL_ASICREV_SHIFT;
2499 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2500 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2501 4);
2502 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2503 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2504
2505 /*
2506 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2507 * 5705 A0 and A1 chips.
2508 */
2509 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2510 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2511 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2512 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2513 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2514
2515 if (bge_has_eaddr(sc))
2516 sc->bge_flags |= BGE_FLAG_EADDR;
2517
2518 /* Save chipset family. */
2519 switch (sc->bge_asicrev) {
2520 case BGE_ASICREV_BCM5755:
2521 case BGE_ASICREV_BCM5761:
2522 case BGE_ASICREV_BCM5784:
2523 case BGE_ASICREV_BCM5785:
2524 case BGE_ASICREV_BCM5787:
2525 case BGE_ASICREV_BCM57780:
2526 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2527 BGE_FLAG_5705_PLUS;
2528 break;
2529 case BGE_ASICREV_BCM5700:
2530 case BGE_ASICREV_BCM5701:
2531 case BGE_ASICREV_BCM5703:
2532 case BGE_ASICREV_BCM5704:
2533 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2534 break;
2535 case BGE_ASICREV_BCM5714_A0:
2536 case BGE_ASICREV_BCM5780:
2537 case BGE_ASICREV_BCM5714:
2538 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2539 /* FALLTHROUGH */
2540 case BGE_ASICREV_BCM5750:
2541 case BGE_ASICREV_BCM5752:
2542 case BGE_ASICREV_BCM5906:
2543 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2544 /* FALLTHROUGH */
2545 case BGE_ASICREV_BCM5705:
2546 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2547 break;
2548 }
2549
2550 /* Set various bug flags. */
2551 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2552 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2553 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2554 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2555 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2556 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2557 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2558 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2559 if (pci_get_subvendor(dev) == DELL_VENDORID)
2560 sc->bge_flags |= BGE_FLAG_NO_3LED;
2561 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2562 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
2563 if (BGE_IS_5705_PLUS(sc) &&
2564 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2565 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2566 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2567 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2568 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2569 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2570 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2571 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2572 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2573 sc->bge_flags |= BGE_FLAG_BER_BUG;
2574 }
2575
2576 /*
2577 * All controllers that are not 5755 or higher have 4GB
2578 * boundary DMA bug.
2579 * Whenever an address crosses a multiple of the 4GB boundary
2580 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2581 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2582 * state machine will lockup and cause the device to hang.
2583 */
2584 if (BGE_IS_5755_PLUS(sc) == 0)
2585 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2586
2587 /*
2588 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2589 * but I do not know the DEVICEID for the 5788M.
2590 */
2591 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2592 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2593 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2594 sc->bge_flags |= BGE_FLAG_5788;
2595
2596 /*
2597 * Some controllers seem to require a special firmware to use
2598 * TSO. But the firmware is not available to FreeBSD and Linux
2599 * claims that the TSO performed by the firmware is slower than
2600 * hardware based TSO. Moreover the firmware based TSO has one
2601 * known bug which can't handle TSO if ethernet header + IP/TCP
2602 * header is greater than 80 bytes. The workaround for the TSO
2603 * bug exist but it seems it's too expensive than not using
2604 * TSO at all. Some hardwares also have the TSO bug so limit
2605 * the TSO to the controllers that are not affected TSO issues
2606 * (e.g. 5755 or higher).
2607 */
2608 if (BGE_IS_5755_PLUS(sc)) {
2609 /*
2610 * BCM5754 and BCM5787 shares the same ASIC id so
2611 * explicit device id check is required.
2612 * Due to unknown reason TSO does not work on BCM5755M.
2613 */
2614 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2615 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2616 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2617 sc->bge_flags |= BGE_FLAG_TSO;
2618 }
2619
2620 /*
2621 * Check if this is a PCI-X or PCI Express device.
2622 */
2623 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2624 /*
2625 * Found a PCI Express capabilities register, this
2626 * must be a PCI Express device.
2627 */
2628 sc->bge_flags |= BGE_FLAG_PCIE;
2629 sc->bge_expcap = reg;
2630 if (pci_get_max_read_req(dev) != 4096)
2631 pci_set_max_read_req(dev, 4096);
2632 } else {
2633 /*
2634 * Check if the device is in PCI-X Mode.
2635 * (This bit is not valid on PCI Express controllers.)
2636 */
2637 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2638 sc->bge_pcixcap = reg;
2639 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2640 BGE_PCISTATE_PCI_BUSMODE) == 0)
2641 sc->bge_flags |= BGE_FLAG_PCIX;
2642 }
2643
2644 /*
2645 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2646 * not actually a MAC controller bug but an issue with the embedded
2647 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2648 */
2649 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2650 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2651 /*
2652 * Allocate the interrupt, using MSI if possible. These devices
2653 * support 8 MSI messages, but only the first one is used in
2654 * normal operation.
2655 */
2656 rid = 0;
2657 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2658 sc->bge_msicap = reg;
2659 if (bge_can_use_msi(sc)) {
2660 msicount = pci_msi_count(dev);
2661 if (msicount > 1)
2662 msicount = 1;
2663 } else
2664 msicount = 0;
2665 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2666 rid = 1;
2667 sc->bge_flags |= BGE_FLAG_MSI;
2668 }
2669 }
2670
2671 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2672 RF_SHAREABLE | RF_ACTIVE);
2673
2674 if (sc->bge_irq == NULL) {
2675 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2676 error = ENXIO;
2677 goto fail;
2678 }
2679
2680 device_printf(dev,
2681 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2682 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2683 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2684 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2685
2686 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2687
2688 /* Try to reset the chip. */
2689 if (bge_reset(sc)) {
2690 device_printf(sc->bge_dev, "chip reset failed\n");
2691 error = ENXIO;
2692 goto fail;
2693 }
2694
2695 sc->bge_asf_mode = 0;
2696 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2697 == BGE_MAGIC_NUMBER)) {
2698 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2699 & BGE_HWCFG_ASF) {
2700 sc->bge_asf_mode |= ASF_ENABLE;
2701 sc->bge_asf_mode |= ASF_STACKUP;
2702 if (BGE_IS_575X_PLUS(sc))
2703 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2704 }
2705 }
2706
2707 /* Try to reset the chip again the nice way. */
2708 bge_stop_fw(sc);
2709 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2710 if (bge_reset(sc)) {
2711 device_printf(sc->bge_dev, "chip reset failed\n");
2712 error = ENXIO;
2713 goto fail;
2714 }
2715
2716 bge_sig_legacy(sc, BGE_RESET_STOP);
2717 bge_sig_post_reset(sc, BGE_RESET_STOP);
2718
2719 if (bge_chipinit(sc)) {
2720 device_printf(sc->bge_dev, "chip initialization failed\n");
2721 error = ENXIO;
2722 goto fail;
2723 }
2724
2725 error = bge_get_eaddr(sc, eaddr);
2726 if (error) {
2727 device_printf(sc->bge_dev,
2728 "failed to read station address\n");
2729 error = ENXIO;
2730 goto fail;
2731 }
2732
2733 /* 5705 limits RX return ring to 512 entries. */
2734 if (BGE_IS_5705_PLUS(sc))
2735 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2736 else
2737 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2738
2739 if (bge_dma_alloc(sc)) {
2740 device_printf(sc->bge_dev,
2741 "failed to allocate DMA resources\n");
2742 error = ENXIO;
2743 goto fail;
2744 }
2745
2746 bge_add_sysctls(sc);
2747
2748 /* Set default tuneable values. */
2749 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2750 sc->bge_rx_coal_ticks = 150;
2751 sc->bge_tx_coal_ticks = 150;
2752 sc->bge_rx_max_coal_bds = 10;
2753 sc->bge_tx_max_coal_bds = 10;
2754
2755 /* Initialize checksum features to use. */
2756 sc->bge_csum_features = BGE_CSUM_FEATURES;
2757 if (sc->bge_forced_udpcsum != 0)
2758 sc->bge_csum_features |= CSUM_UDP;
2759
2760 /* Set up ifnet structure */
2761 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2762 if (ifp == NULL) {
2763 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2764 error = ENXIO;
2765 goto fail;
2766 }
2767 ifp->if_softc = sc;
2768 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2769 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2770 ifp->if_ioctl = bge_ioctl;
2771 ifp->if_start = bge_start;
2772 ifp->if_init = bge_init;
2773 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2774 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2775 IFQ_SET_READY(&ifp->if_snd);
2776 ifp->if_hwassist = sc->bge_csum_features;
2777 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2778 IFCAP_VLAN_MTU;
2779 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2780 ifp->if_hwassist |= CSUM_TSO;
2781 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
2782 }
2783#ifdef IFCAP_VLAN_HWCSUM
2784 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2785#endif
2786 ifp->if_capenable = ifp->if_capabilities;
2787#ifdef DEVICE_POLLING
2788 ifp->if_capabilities |= IFCAP_POLLING;
2789#endif
2790
2791 /*
2792 * 5700 B0 chips do not support checksumming correctly due
2793 * to hardware bugs.
2794 */
2795 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2796 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2797 ifp->if_capenable &= ~IFCAP_HWCSUM;
2798 ifp->if_hwassist = 0;
2799 }
2800
2801 /*
2802 * Figure out what sort of media we have by checking the
2803 * hardware config word in the first 32k of NIC internal memory,
2804 * or fall back to examining the EEPROM if necessary.
2805 * Note: on some BCM5700 cards, this value appears to be unset.
2806 * If that's the case, we have to rely on identifying the NIC
2807 * by its PCI subsystem ID, as we do below for the SysKonnect
2808 * SK-9D41.
2809 */
2810 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2811 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2812 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2813 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2814 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2815 sizeof(hwcfg))) {
2816 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2817 error = ENXIO;
2818 goto fail;
2819 }
2820 hwcfg = ntohl(hwcfg);
2821 }
2822
2823 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2824 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
2825 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2826 if (BGE_IS_5714_FAMILY(sc))
2827 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2828 else
2829 sc->bge_flags |= BGE_FLAG_TBI;
2830 }
2831
2832 if (sc->bge_flags & BGE_FLAG_TBI) {
2833 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2834 bge_ifmedia_sts);
2835 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2836 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2837 0, NULL);
2838 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2839 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2840 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2841 } else {
2842 /*
2843 * Do transceiver setup and tell the firmware the
2844 * driver is down so we can try to get access the
2845 * probe if ASF is running. Retry a couple of times
2846 * if we get a conflict with the ASF firmware accessing
2847 * the PHY.
2848 */
2849 trys = 0;
2850 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2851again:
2852 bge_asf_driver_up(sc);
2853
2854 if (mii_phy_probe(dev, &sc->bge_miibus,
2855 bge_ifmedia_upd, bge_ifmedia_sts)) {
2856 if (trys++ < 4) {
2857 device_printf(sc->bge_dev, "Try again\n");
2858 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2859 BMCR_RESET);
2860 goto again;
2861 }
2862
2863 device_printf(sc->bge_dev, "MII without any PHY!\n");
2864 error = ENXIO;
2865 goto fail;
2866 }
2867
2868 /*
2869 * Now tell the firmware we are going up after probing the PHY
2870 */
2871 if (sc->bge_asf_mode & ASF_STACKUP)
2872 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2873 }
2874
2875 /*
2876 * When using the BCM5701 in PCI-X mode, data corruption has
2877 * been observed in the first few bytes of some received packets.
2878 * Aligning the packet buffer in memory eliminates the corruption.
2879 * Unfortunately, this misaligns the packet payloads. On platforms
2880 * which do not support unaligned accesses, we will realign the
2881 * payloads by copying the received packets.
2882 */
2883 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2884 sc->bge_flags & BGE_FLAG_PCIX)
2885 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2886
2887 /*
2888 * Call MI attach routine.
2889 */
2890 ether_ifattach(ifp, eaddr);
2891 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2892
2893 /* Tell upper layer we support long frames. */
2894 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2895
2896 /*
2897 * Hookup IRQ last.
2898 */
2899#if __FreeBSD_version > 700030
2900 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
2901 /* Take advantage of single-shot MSI. */
2902 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
2903 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
2904 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
2905 taskqueue_thread_enqueue, &sc->bge_tq);
2906 if (sc->bge_tq == NULL) {
2907 device_printf(dev, "could not create taskqueue.\n");
2908 ether_ifdetach(ifp);
2909 error = ENXIO;
2910 goto fail;
2911 }
2912 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
2913 device_get_nameunit(sc->bge_dev));
2914 error = bus_setup_intr(dev, sc->bge_irq,
2915 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
2916 &sc->bge_intrhand);
2917 if (error)
2918 ether_ifdetach(ifp);
2919 } else
2920 error = bus_setup_intr(dev, sc->bge_irq,
2921 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
2922 &sc->bge_intrhand);
2923#else
2924 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2925 bge_intr, sc, &sc->bge_intrhand);
2926#endif
2927
2928 if (error) {
2929 bge_detach(dev);
2930 device_printf(sc->bge_dev, "couldn't set up irq\n");
2931 }
2932
2933 return (0);
2934
2935fail:
2936 bge_release_resources(sc);
2937
2938 return (error);
2939}
2940
2941static int
2942bge_detach(device_t dev)
2943{
2944 struct bge_softc *sc;
2945 struct ifnet *ifp;
2946
2947 sc = device_get_softc(dev);
2948 ifp = sc->bge_ifp;
2949
2950#ifdef DEVICE_POLLING
2951 if (ifp->if_capenable & IFCAP_POLLING)
2952 ether_poll_deregister(ifp);
2953#endif
2954
2955 BGE_LOCK(sc);
2956 bge_stop(sc);
2957 bge_reset(sc);
2958 BGE_UNLOCK(sc);
2959
2960 callout_drain(&sc->bge_stat_ch);
2961
2962 if (sc->bge_tq)
2963 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
2964 ether_ifdetach(ifp);
2965
2966 if (sc->bge_flags & BGE_FLAG_TBI) {
2967 ifmedia_removeall(&sc->bge_ifmedia);
2968 } else {
2969 bus_generic_detach(dev);
2970 device_delete_child(dev, sc->bge_miibus);
2971 }
2972
2973 bge_release_resources(sc);
2974
2975 return (0);
2976}
2977
2978static void
2979bge_release_resources(struct bge_softc *sc)
2980{
2981 device_t dev;
2982
2983 dev = sc->bge_dev;
2984
2985 if (sc->bge_tq != NULL)
2986 taskqueue_free(sc->bge_tq);
2987
2988 if (sc->bge_intrhand != NULL)
2989 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2990
2991 if (sc->bge_irq != NULL)
2992 bus_release_resource(dev, SYS_RES_IRQ,
2993 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2994
2995 if (sc->bge_flags & BGE_FLAG_MSI)
2996 pci_release_msi(dev);
2997
2998 if (sc->bge_res != NULL)
2999 bus_release_resource(dev, SYS_RES_MEMORY,
3000 PCIR_BAR(0), sc->bge_res);
3001
3002 if (sc->bge_ifp != NULL)
3003 if_free(sc->bge_ifp);
3004
3005 bge_dma_free(sc);
3006
3007 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3008 BGE_LOCK_DESTROY(sc);
3009}
3010
3011static int
3012bge_reset(struct bge_softc *sc)
3013{
3014 device_t dev;
3015 uint32_t cachesize, command, pcistate, reset, val;
3016 void (*write_op)(struct bge_softc *, int, int);
3017 uint16_t devctl;
3018 int i;
3019
3020 dev = sc->bge_dev;
3021
3022 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3023 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3024 if (sc->bge_flags & BGE_FLAG_PCIE)
3025 write_op = bge_writemem_direct;
3026 else
3027 write_op = bge_writemem_ind;
3028 } else
3029 write_op = bge_writereg_ind;
3030
3031 /* Save some important PCI state. */
3032 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3033 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3034 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3035
3036 pci_write_config(dev, BGE_PCI_MISC_CTL,
3037 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3038 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3039
3040 /* Disable fastboot on controllers that support it. */
3041 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3042 BGE_IS_5755_PLUS(sc)) {
3043 if (bootverbose)
3044 device_printf(dev, "Disabling fastboot\n");
3045 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3046 }
3047
3048 /*
3049 * Write the magic number to SRAM at offset 0xB50.
3050 * When firmware finishes its initialization it will
3051 * write ~BGE_MAGIC_NUMBER to the same location.
3052 */
3053 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3054
3055 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3056
3057 /* XXX: Broadcom Linux driver. */
3058 if (sc->bge_flags & BGE_FLAG_PCIE) {
3059 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3060 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3061 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3062 /* Prevent PCIE link training during global reset */
3063 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3064 reset |= 1 << 29;
3065 }
3066 }
3067
3068 /*
3069 * Set GPHY Power Down Override to leave GPHY
3070 * powered up in D0 uninitialized.
3071 */
3072 if (BGE_IS_5705_PLUS(sc))
3073 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3074
3075 /* Issue global reset */
3076 write_op(sc, BGE_MISC_CFG, reset);
3077
3078 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3079 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3080 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3081 val | BGE_VCPU_STATUS_DRV_RESET);
3082 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3083 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3084 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3085 }
3086
3087 DELAY(1000);
3088
3089 /* XXX: Broadcom Linux driver. */
3090 if (sc->bge_flags & BGE_FLAG_PCIE) {
3091 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3092 DELAY(500000); /* wait for link training to complete */
3093 val = pci_read_config(dev, 0xC4, 4);
3094 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3095 }
3096 devctl = pci_read_config(dev,
3097 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3098 /* Clear enable no snoop and disable relaxed ordering. */
3099 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3100 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3101 /* Set PCIE max payload size to 128. */
3102 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3103 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3104 devctl, 2);
3105 /* Clear error status. */
3106 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3107 PCIM_EXP_STA_CORRECTABLE_ERROR |
3108 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3109 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3110 }
3111
3112 /* Reset some of the PCI state that got zapped by reset. */
3113 pci_write_config(dev, BGE_PCI_MISC_CTL,
3114 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3115 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3116 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3117 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3118 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3119 /*
3120 * Disable PCI-X relaxed ordering to ensure status block update
3121 * comes first then packet buffer DMA. Otherwise driver may
3122 * read stale status block.
3123 */
3124 if (sc->bge_flags & BGE_FLAG_PCIX) {
3125 devctl = pci_read_config(dev,
3126 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3127 devctl &= ~PCIXM_COMMAND_ERO;
3128 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3129 devctl &= ~PCIXM_COMMAND_MAX_READ;
3130 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3131 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3132 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3133 PCIXM_COMMAND_MAX_READ);
3134 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3135 }
3136 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3137 devctl, 2);
3138 }
3139 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3140 if (BGE_IS_5714_FAMILY(sc)) {
3141 /* This chip disables MSI on reset. */
3142 if (sc->bge_flags & BGE_FLAG_MSI) {
3143 val = pci_read_config(dev,
3144 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3145 pci_write_config(dev,
3146 sc->bge_msicap + PCIR_MSI_CTRL,
3147 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3148 val = CSR_READ_4(sc, BGE_MSI_MODE);
3149 CSR_WRITE_4(sc, BGE_MSI_MODE,
3150 val | BGE_MSIMODE_ENABLE);
3151 }
3152 val = CSR_READ_4(sc, BGE_MARB_MODE);
3153 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3154 } else
3155 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3156
3157 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3158 for (i = 0; i < BGE_TIMEOUT; i++) {
3159 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3160 if (val & BGE_VCPU_STATUS_INIT_DONE)
3161 break;
3162 DELAY(100);
3163 }
3164 if (i == BGE_TIMEOUT) {
3165 device_printf(dev, "reset timed out\n");
3166 return (1);
3167 }
3168 } else {
3169 /*
3170 * Poll until we see the 1's complement of the magic number.
3171 * This indicates that the firmware initialization is complete.
3172 * We expect this to fail if no chip containing the Ethernet
3173 * address is fitted though.
3174 */
3175 for (i = 0; i < BGE_TIMEOUT; i++) {
3176 DELAY(10);
3177 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3178 if (val == ~BGE_MAGIC_NUMBER)
3179 break;
3180 }
3181
3182 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3183 device_printf(dev,
3184 "firmware handshake timed out, found 0x%08x\n",
3185 val);
3186 }
3187
3188 /*
3189 * XXX Wait for the value of the PCISTATE register to
3190 * return to its original pre-reset state. This is a
3191 * fairly good indicator of reset completion. If we don't
3192 * wait for the reset to fully complete, trying to read
3193 * from the device's non-PCI registers may yield garbage
3194 * results.
3195 */
3196 for (i = 0; i < BGE_TIMEOUT; i++) {
3197 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3198 break;
3199 DELAY(10);
3200 }
3201
3202 /* Fix up byte swapping. */
3203 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3204 BGE_MODECTL_BYTESWAP_DATA);
3205
3206 /* Tell the ASF firmware we are up */
3207 if (sc->bge_asf_mode & ASF_STACKUP)
3208 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3209
3210 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3211
3212 /*
3213 * The 5704 in TBI mode apparently needs some special
3214 * adjustment to insure the SERDES drive level is set
3215 * to 1.2V.
3216 */
3217 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3218 sc->bge_flags & BGE_FLAG_TBI) {
3219 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3220 val = (val & ~0xFFF) | 0x880;
3221 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3222 }
3223
3224 /* XXX: Broadcom Linux driver. */
3225 if (sc->bge_flags & BGE_FLAG_PCIE &&
3226 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3227 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3228 /* Enable Data FIFO protection. */
3229 val = CSR_READ_4(sc, 0x7C00);
3230 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3231 }
3232 DELAY(10000);
3233
3234 return (0);
3235}
3236
3237static __inline void
3238bge_rxreuse_std(struct bge_softc *sc, int i)
3239{
3240 struct bge_rx_bd *r;
3241
3242 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3243 r->bge_flags = BGE_RXBDFLAG_END;
3244 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3245 r->bge_idx = i;
3246 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3247}
3248
3249static __inline void
3250bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3251{
3252 struct bge_extrx_bd *r;
3253
3254 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3255 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3256 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3257 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3258 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3259 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3260 r->bge_idx = i;
3261 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3262}
3263
3264/*
3265 * Frame reception handling. This is called if there's a frame
3266 * on the receive return list.
3267 *
3268 * Note: we have to be able to handle two possibilities here:
3269 * 1) the frame is from the jumbo receive ring
3270 * 2) the frame is from the standard receive ring
3271 */
3272
3273static int
3274bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3275{
3276 struct ifnet *ifp;
3277 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3278 uint16_t rx_cons;
3279
3280 rx_cons = sc->bge_rx_saved_considx;
3281
3282 /* Nothing to do. */
3283 if (rx_cons == rx_prod)
3284 return (rx_npkts);
3285
3286 ifp = sc->bge_ifp;
3287
3288 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3289 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3290 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3291 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3292 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3293 (MCLBYTES - ETHER_ALIGN))
3294 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3295 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3296
3297 while (rx_cons != rx_prod) {
3298 struct bge_rx_bd *cur_rx;
3299 uint32_t rxidx;
3300 struct mbuf *m = NULL;
3301 uint16_t vlan_tag = 0;
3302 int have_tag = 0;
3303
3304#ifdef DEVICE_POLLING
3305 if (ifp->if_capenable & IFCAP_POLLING) {
3306 if (sc->rxcycles <= 0)
3307 break;
3308 sc->rxcycles--;
3309 }
3310#endif
3311
3312 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3313
3314 rxidx = cur_rx->bge_idx;
3315 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3316
3317 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3318 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3319 have_tag = 1;
3320 vlan_tag = cur_rx->bge_vlan_tag;
3321 }
3322
3323 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3324 jumbocnt++;
3325 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3326 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3327 bge_rxreuse_jumbo(sc, rxidx);
3328 continue;
3329 }
3330 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3331 bge_rxreuse_jumbo(sc, rxidx);
3332 ifp->if_iqdrops++;
3333 continue;
3334 }
3335 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3336 } else {
3337 stdcnt++;
3338 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3339 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3340 bge_rxreuse_std(sc, rxidx);
3341 continue;
3342 }
3343 if (bge_newbuf_std(sc, rxidx) != 0) {
3344 bge_rxreuse_std(sc, rxidx);
3345 ifp->if_iqdrops++;
3346 continue;
3347 }
3348 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3349 }
3350
3351 ifp->if_ipackets++;
3352#ifndef __NO_STRICT_ALIGNMENT
3353 /*
3354 * For architectures with strict alignment we must make sure
3355 * the payload is aligned.
3356 */
3357 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3358 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3359 cur_rx->bge_len);
3360 m->m_data += ETHER_ALIGN;
3361 }
3362#endif
3363 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3364 m->m_pkthdr.rcvif = ifp;
3365
3366 if (ifp->if_capenable & IFCAP_RXCSUM) {
3367 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3368 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3369 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3370 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3371 }
3372 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3373 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3374 m->m_pkthdr.csum_data =
3375 cur_rx->bge_tcp_udp_csum;
3376 m->m_pkthdr.csum_flags |=
3377 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3378 }
3379 }
3380
3381 /*
3382 * If we received a packet with a vlan tag,
3383 * attach that information to the packet.
3384 */
3385 if (have_tag) {
3386#if __FreeBSD_version > 700022
3387 m->m_pkthdr.ether_vtag = vlan_tag;
3388 m->m_flags |= M_VLANTAG;
3389#else
3390 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3391 if (m == NULL)
3392 continue;
3393#endif
3394 }
3395
3396 if (holdlck != 0) {
3397 BGE_UNLOCK(sc);
3398 (*ifp->if_input)(ifp, m);
3399 BGE_LOCK(sc);
3400 } else
3401 (*ifp->if_input)(ifp, m);
3402 rx_npkts++;
3403
3404 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3405 return (rx_npkts);
3406 }
3407
3408 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3409 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3410 if (stdcnt > 0)
3411 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3412 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3413
3414 if (jumbocnt > 0)
3415 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3416 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3417
3418 sc->bge_rx_saved_considx = rx_cons;
3419 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3420 if (stdcnt)
3421 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3422 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3423 if (jumbocnt)
3424 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3425 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3426#ifdef notyet
3427 /*
3428 * This register wraps very quickly under heavy packet drops.
3429 * If you need correct statistics, you can enable this check.
3430 */
3431 if (BGE_IS_5705_PLUS(sc))
3432 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3433#endif
3434 return (rx_npkts);
3435}
3436
3437static void
3438bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3439{
3440 struct bge_tx_bd *cur_tx;
3441 struct ifnet *ifp;
3442
3443 BGE_LOCK_ASSERT(sc);
3444
3445 /* Nothing to do. */
3446 if (sc->bge_tx_saved_considx == tx_cons)
3447 return;
3448
3449 ifp = sc->bge_ifp;
3450
3451 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3452 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3453 /*
3454 * Go through our tx ring and free mbufs for those
3455 * frames that have been sent.
3456 */
3457 while (sc->bge_tx_saved_considx != tx_cons) {
3458 uint32_t idx;
3459
3460 idx = sc->bge_tx_saved_considx;
3461 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3462 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3463 ifp->if_opackets++;
3464 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3465 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3466 sc->bge_cdata.bge_tx_dmamap[idx],
3467 BUS_DMASYNC_POSTWRITE);
3468 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3469 sc->bge_cdata.bge_tx_dmamap[idx]);
3470 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3471 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3472 }
3473 sc->bge_txcnt--;
3474 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3475 }
3476
3477 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3478 if (sc->bge_txcnt == 0)
3479 sc->bge_timer = 0;
3480}
3481
3482#ifdef DEVICE_POLLING
3483static int
3484bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3485{
3486 struct bge_softc *sc = ifp->if_softc;
3487 uint16_t rx_prod, tx_cons;
3488 uint32_t statusword;
3489 int rx_npkts = 0;
3490
3491 BGE_LOCK(sc);
3492 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3493 BGE_UNLOCK(sc);
3494 return (rx_npkts);
3495 }
3496
3497 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3498 sc->bge_cdata.bge_status_map,
3499 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3500 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3501 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3502
3503 statusword = sc->bge_ldata.bge_status_block->bge_status;
3504 sc->bge_ldata.bge_status_block->bge_status = 0;
3505
3506 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3507 sc->bge_cdata.bge_status_map,
3508 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3509
3510 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3511 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3512 sc->bge_link_evt++;
3513
3514 if (cmd == POLL_AND_CHECK_STATUS)
3515 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3516 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3517 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3518 bge_link_upd(sc);
3519
3520 sc->rxcycles = count;
3521 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3522 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3523 BGE_UNLOCK(sc);
3524 return (rx_npkts);
3525 }
3526 bge_txeof(sc, tx_cons);
3527 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3528 bge_start_locked(ifp);
3529
3530 BGE_UNLOCK(sc);
3531 return (rx_npkts);
3532}
3533#endif /* DEVICE_POLLING */
3534
3535static int
3536bge_msi_intr(void *arg)
3537{
3538 struct bge_softc *sc;
3539
3540 sc = (struct bge_softc *)arg;
3541 /*
3542 * This interrupt is not shared and controller already
3543 * disabled further interrupt.
3544 */
3545 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3546 return (FILTER_HANDLED);
3547}
3548
3549static void
3550bge_intr_task(void *arg, int pending)
3551{
3552 struct bge_softc *sc;
3553 struct ifnet *ifp;
3554 uint32_t status;
3555 uint16_t rx_prod, tx_cons;
3556
3557 sc = (struct bge_softc *)arg;
3558 ifp = sc->bge_ifp;
3559
3560 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3561 return;
3562
3563 /* Get updated status block. */
3564 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3565 sc->bge_cdata.bge_status_map,
3566 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3567
3568 /* Save producer/consumer indexess. */
3569 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3570 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3571 status = sc->bge_ldata.bge_status_block->bge_status;
3572 sc->bge_ldata.bge_status_block->bge_status = 0;
3573 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3574 sc->bge_cdata.bge_status_map,
3575 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3576 /* Let controller work. */
3577 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3578
3579 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) {
3580 BGE_LOCK(sc);
3581 bge_link_upd(sc);
3582 BGE_UNLOCK(sc);
3583 }
3584 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3585 /* Check RX return ring producer/consumer. */
3586 bge_rxeof(sc, rx_prod, 0);
3587 }
3588 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3589 BGE_LOCK(sc);
3590 /* Check TX ring producer/consumer. */
3591 bge_txeof(sc, tx_cons);
3592 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3593 bge_start_locked(ifp);
3594 BGE_UNLOCK(sc);
3595 }
3596}
3597
3598static void
3599bge_intr(void *xsc)
3600{
3601 struct bge_softc *sc;
3602 struct ifnet *ifp;
3603 uint32_t statusword;
3604 uint16_t rx_prod, tx_cons;
3605
3606 sc = xsc;
3607
3608 BGE_LOCK(sc);
3609
3610 ifp = sc->bge_ifp;
3611
3612#ifdef DEVICE_POLLING
3613 if (ifp->if_capenable & IFCAP_POLLING) {
3614 BGE_UNLOCK(sc);
3615 return;
3616 }
3617#endif
3618
3619 /*
3620 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3621 * disable interrupts by writing nonzero like we used to, since with
3622 * our current organization this just gives complications and
3623 * pessimizations for re-enabling interrupts. We used to have races
3624 * instead of the necessary complications. Disabling interrupts
3625 * would just reduce the chance of a status update while we are
3626 * running (by switching to the interrupt-mode coalescence
3627 * parameters), but this chance is already very low so it is more
3628 * efficient to get another interrupt than prevent it.
3629 *
3630 * We do the ack first to ensure another interrupt if there is a
3631 * status update after the ack. We don't check for the status
3632 * changing later because it is more efficient to get another
3633 * interrupt than prevent it, not quite as above (not checking is
3634 * a smaller optimization than not toggling the interrupt enable,
3635 * since checking doesn't involve PCI accesses and toggling require
3636 * the status check). So toggling would probably be a pessimization
3637 * even with MSI. It would only be needed for using a task queue.
3638 */
3639 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3640
3641 /*
3642 * Do the mandatory PCI flush as well as get the link status.
3643 */
3644 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3645
3646 /* Make sure the descriptor ring indexes are coherent. */
3647 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3648 sc->bge_cdata.bge_status_map,
3649 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3650 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3651 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3652 sc->bge_ldata.bge_status_block->bge_status = 0;
3653 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3654 sc->bge_cdata.bge_status_map,
3655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3656
3657 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3658 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3659 statusword || sc->bge_link_evt)
3660 bge_link_upd(sc);
3661
3662 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3663 /* Check RX return ring producer/consumer. */
3664 bge_rxeof(sc, rx_prod, 1);
3665 }
3666
3667 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3668 /* Check TX ring producer/consumer. */
3669 bge_txeof(sc, tx_cons);
3670 }
3671
3672 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3673 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3674 bge_start_locked(ifp);
3675
3676 BGE_UNLOCK(sc);
3677}
3678
3679static void
3680bge_asf_driver_up(struct bge_softc *sc)
3681{
3682 if (sc->bge_asf_mode & ASF_STACKUP) {
3683 /* Send ASF heartbeat aprox. every 2s */
3684 if (sc->bge_asf_count)
3685 sc->bge_asf_count --;
3686 else {
3687 sc->bge_asf_count = 2;
3688 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3689 BGE_FW_DRV_ALIVE);
3690 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3691 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3692 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3693 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3694 }
3695 }
3696}
3697
3698static void
3699bge_tick(void *xsc)
3700{
3701 struct bge_softc *sc = xsc;
3702 struct mii_data *mii = NULL;
3703
3704 BGE_LOCK_ASSERT(sc);
3705
3706 /* Synchronize with possible callout reset/stop. */
3707 if (callout_pending(&sc->bge_stat_ch) ||
3708 !callout_active(&sc->bge_stat_ch))
3709 return;
3710
3711 if (BGE_IS_5705_PLUS(sc))
3712 bge_stats_update_regs(sc);
3713 else
3714 bge_stats_update(sc);
3715
3716 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3717 mii = device_get_softc(sc->bge_miibus);
3718 /*
3719 * Do not touch PHY if we have link up. This could break
3720 * IPMI/ASF mode or produce extra input errors
3721 * (extra errors was reported for bcm5701 & bcm5704).
3722 */
3723 if (!sc->bge_link)
3724 mii_tick(mii);
3725 } else {
3726 /*
3727 * Since in TBI mode auto-polling can't be used we should poll
3728 * link status manually. Here we register pending link event
3729 * and trigger interrupt.
3730 */
3731#ifdef DEVICE_POLLING
3732 /* In polling mode we poll link state in bge_poll(). */
3733 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3734#endif
3735 {
3736 sc->bge_link_evt++;
3737 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3738 sc->bge_flags & BGE_FLAG_5788)
3739 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3740 else
3741 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3742 }
3743 }
3744
3745 bge_asf_driver_up(sc);
3746 bge_watchdog(sc);
3747
3748 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3749}
3750
3751static void
3752bge_stats_update_regs(struct bge_softc *sc)
3753{
3754 struct ifnet *ifp;
3755 struct bge_mac_stats *stats;
3756
3757 ifp = sc->bge_ifp;
3758 stats = &sc->bge_mac_stats;
3759
3760 stats->ifHCOutOctets +=
3761 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3762 stats->etherStatsCollisions +=
3763 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3764 stats->outXonSent +=
3765 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3766 stats->outXoffSent +=
3767 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3768 stats->dot3StatsInternalMacTransmitErrors +=
3769 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3770 stats->dot3StatsSingleCollisionFrames +=
3771 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3772 stats->dot3StatsMultipleCollisionFrames +=
3773 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3774 stats->dot3StatsDeferredTransmissions +=
3775 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3776 stats->dot3StatsExcessiveCollisions +=
3777 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3778 stats->dot3StatsLateCollisions +=
3779 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3780 stats->ifHCOutUcastPkts +=
3781 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3782 stats->ifHCOutMulticastPkts +=
3783 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3784 stats->ifHCOutBroadcastPkts +=
3785 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3786
3787 stats->ifHCInOctets +=
3788 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3789 stats->etherStatsFragments +=
3790 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3791 stats->ifHCInUcastPkts +=
3792 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3793 stats->ifHCInMulticastPkts +=
3794 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3795 stats->ifHCInBroadcastPkts +=
3796 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3797 stats->dot3StatsFCSErrors +=
3798 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3799 stats->dot3StatsAlignmentErrors +=
3800 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3801 stats->xonPauseFramesReceived +=
3802 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3803 stats->xoffPauseFramesReceived +=
3804 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3805 stats->macControlFramesReceived +=
3806 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3807 stats->xoffStateEntered +=
3808 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3809 stats->dot3StatsFramesTooLong +=
3810 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3811 stats->etherStatsJabbers +=
3812 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3813 stats->etherStatsUndersizePkts +=
3814 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3815
3816 stats->FramesDroppedDueToFilters +=
3817 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3818 stats->DmaWriteQueueFull +=
3819 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3820 stats->DmaWriteHighPriQueueFull +=
3821 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3822 stats->NoMoreRxBDs +=
3823 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3824 stats->InputDiscards +=
3825 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3826 stats->InputErrors +=
3827 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3828 stats->RecvThresholdHit +=
3829 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3830
3831 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
3832 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
3833 stats->InputErrors);
3834}
3835
3836static void
3837bge_stats_clear_regs(struct bge_softc *sc)
3838{
3839
3840 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
3841 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
3842 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
3843 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
3844 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
3845 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
3846 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
3847 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
3848 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
3849 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
3850 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
3851 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
3852 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
3853
3854 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
3855 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
3856 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
3857 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
3858 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
3859 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
3860 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
3861 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
3862 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
3863 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
3864 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
3865 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
3866 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
3867 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
3868
3869 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
3870 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
3871 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
3872 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3873 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3874 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3875 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
3876}
3877
3878static void
3879bge_stats_update(struct bge_softc *sc)
3880{
3881 struct ifnet *ifp;
3882 bus_size_t stats;
3883 uint32_t cnt; /* current register value */
3884
3885 ifp = sc->bge_ifp;
3886
3887 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3888
3889#define READ_STAT(sc, stats, stat) \
3890 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3891
3892 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3893 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3894 sc->bge_tx_collisions = cnt;
3895
3896 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3897 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3898 sc->bge_rx_discards = cnt;
3899
3900 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3901 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3902 sc->bge_tx_discards = cnt;
3903
3904#undef READ_STAT
3905}
3906
3907/*
3908 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3909 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3910 * but when such padded frames employ the bge IP/TCP checksum offload,
3911 * the hardware checksum assist gives incorrect results (possibly
3912 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3913 * If we pad such runts with zeros, the onboard checksum comes out correct.
3914 */
3915static __inline int
3916bge_cksum_pad(struct mbuf *m)
3917{
3918 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3919 struct mbuf *last;
3920
3921 /* If there's only the packet-header and we can pad there, use it. */
3922 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3923 M_TRAILINGSPACE(m) >= padlen) {
3924 last = m;
3925 } else {
3926 /*
3927 * Walk packet chain to find last mbuf. We will either
3928 * pad there, or append a new mbuf and pad it.
3929 */
3930 for (last = m; last->m_next != NULL; last = last->m_next);
3931 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3932 /* Allocate new empty mbuf, pad it. Compact later. */
3933 struct mbuf *n;
3934
3935 MGET(n, M_DONTWAIT, MT_DATA);
3936 if (n == NULL)
3937 return (ENOBUFS);
3938 n->m_len = 0;
3939 last->m_next = n;
3940 last = n;
3941 }
3942 }
3943
3944 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3945 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3946 last->m_len += padlen;
3947 m->m_pkthdr.len += padlen;
3948
3949 return (0);
3950}
3951
3952static struct mbuf *
3953bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
3954{
3955 struct ip *ip;
3956 struct tcphdr *tcp;
3957 struct mbuf *n;
3958 uint16_t hlen;
3959 uint32_t poff;
3960
3961 if (M_WRITABLE(m) == 0) {
3962 /* Get a writable copy. */
3963 n = m_dup(m, M_DONTWAIT);
3964 m_freem(m);
3965 if (n == NULL)
3966 return (NULL);
3967 m = n;
3968 }
3969 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
3970 if (m == NULL)
3971 return (NULL);
3972 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
3973 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
3974 m = m_pullup(m, poff + sizeof(struct tcphdr));
3975 if (m == NULL)
3976 return (NULL);
3977 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
3978 m = m_pullup(m, poff + (tcp->th_off << 2));
3979 if (m == NULL)
3980 return (NULL);
3981 /*
3982 * It seems controller doesn't modify IP length and TCP pseudo
3983 * checksum. These checksum computed by upper stack should be 0.
3984 */
3985 *mss = m->m_pkthdr.tso_segsz;
3986 ip->ip_sum = 0;
3987 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
3988 /* Clear pseudo checksum computed by TCP stack. */
3989 tcp->th_sum = 0;
3990 /*
3991 * Broadcom controllers uses different descriptor format for
3992 * TSO depending on ASIC revision. Due to TSO-capable firmware
3993 * license issue and lower performance of firmware based TSO
3994 * we only support hardware based TSO which is applicable for
3995 * BCM5755 or newer controllers. Hardware based TSO uses 11
3996 * bits to store MSS and upper 5 bits are used to store IP/TCP
3997 * header length(including IP/TCP options). The header length
3998 * is expressed as 32 bits unit.
3999 */
4000 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4001 *mss |= (hlen << 11);
4002 return (m);
4003}
4004
4005/*
4006 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4007 * pointers to descriptors.
4008 */
4009static int
4010bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4011{
4012 bus_dma_segment_t segs[BGE_NSEG_NEW];
4013 bus_dmamap_t map;
4014 struct bge_tx_bd *d;
4015 struct mbuf *m = *m_head;
4016 uint32_t idx = *txidx;
4017 uint16_t csum_flags, mss, vlan_tag;
4018 int nsegs, i, error;
4019
4020 csum_flags = 0;
4021 mss = 0;
4022 vlan_tag = 0;
4023 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4024 *m_head = m = bge_setup_tso(sc, m, &mss);
4025 if (*m_head == NULL)
4026 return (ENOBUFS);
4027 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4028 BGE_TXBDFLAG_CPU_POST_DMA;
4029 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4030 if (m->m_pkthdr.csum_flags & CSUM_IP)
4031 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4032 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4033 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4034 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4035 (error = bge_cksum_pad(m)) != 0) {
4036 m_freem(m);
4037 *m_head = NULL;
4038 return (error);
4039 }
4040 }
4041 if (m->m_flags & M_LASTFRAG)
4042 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4043 else if (m->m_flags & M_FRAG)
4044 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4045 }
4046
4047 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
4048 sc->bge_forced_collapse > 0 &&
4049 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4050 /*
4051 * Forcedly collapse mbuf chains to overcome hardware
4052 * limitation which only support a single outstanding
4053 * DMA read operation.
4054 */
4055 if (sc->bge_forced_collapse == 1)
4056 m = m_defrag(m, M_DONTWAIT);
4057 else
4058 m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
4059 if (m == NULL)
4060 m = *m_head;
4061 *m_head = m;
4062 }
4063
4064 map = sc->bge_cdata.bge_tx_dmamap[idx];
4065 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4066 &nsegs, BUS_DMA_NOWAIT);
4067 if (error == EFBIG) {
4068 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4069 if (m == NULL) {
4070 m_freem(*m_head);
4071 *m_head = NULL;
4072 return (ENOBUFS);
4073 }
4074 *m_head = m;
4075 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4076 m, segs, &nsegs, BUS_DMA_NOWAIT);
4077 if (error) {
4078 m_freem(m);
4079 *m_head = NULL;
4080 return (error);
4081 }
4082 } else if (error != 0)
4083 return (error);
4084
4085 /* Check if we have enough free send BDs. */
4086 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4087 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4088 return (ENOBUFS);
4089 }
4090
4091 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4092
4093#if __FreeBSD_version > 700022
4094 if (m->m_flags & M_VLANTAG) {
4095 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4096 vlan_tag = m->m_pkthdr.ether_vtag;
4097 }
4098#else
4099 {
4100 struct m_tag *mtag;
4101
4102 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4103 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4104 vlan_tag = VLAN_TAG_VALUE(mtag);
4105 }
4106 }
4107#endif
4108 for (i = 0; ; i++) {
4109 d = &sc->bge_ldata.bge_tx_ring[idx];
4110 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4111 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4112 d->bge_len = segs[i].ds_len;
4113 d->bge_flags = csum_flags;
4114 d->bge_vlan_tag = vlan_tag;
4115 d->bge_mss = mss;
4116 if (i == nsegs - 1)
4117 break;
4118 BGE_INC(idx, BGE_TX_RING_CNT);
4119 }
4120
4121 /* Mark the last segment as end of packet... */
4122 d->bge_flags |= BGE_TXBDFLAG_END;
4123
4124 /*
4125 * Insure that the map for this transmission
4126 * is placed at the array index of the last descriptor
4127 * in this chain.
4128 */
4129 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4130 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4131 sc->bge_cdata.bge_tx_chain[idx] = m;
4132 sc->bge_txcnt += nsegs;
4133
4134 BGE_INC(idx, BGE_TX_RING_CNT);
4135 *txidx = idx;
4136
4137 return (0);
4138}
4139
4140/*
4141 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4142 * to the mbuf data regions directly in the transmit descriptors.
4143 */
4144static void
4145bge_start_locked(struct ifnet *ifp)
4146{
4147 struct bge_softc *sc;
4148 struct mbuf *m_head;
4149 uint32_t prodidx;
4150 int count;
4151
4152 sc = ifp->if_softc;
4153 BGE_LOCK_ASSERT(sc);
4154
4155 if (!sc->bge_link ||
4156 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4157 IFF_DRV_RUNNING)
4158 return;
4159
4160 prodidx = sc->bge_tx_prodidx;
4161
4162 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4163 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4164 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4165 break;
4166 }
4167 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4168 if (m_head == NULL)
4169 break;
4170
4171 /*
4172 * XXX
4173 * The code inside the if() block is never reached since we
4174 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4175 * requests to checksum TCP/UDP in a fragmented packet.
4176 *
4177 * XXX
4178 * safety overkill. If this is a fragmented packet chain
4179 * with delayed TCP/UDP checksums, then only encapsulate
4180 * it if we have enough descriptors to handle the entire
4181 * chain at once.
4182 * (paranoia -- may not actually be needed)
4183 */
4184 if (m_head->m_flags & M_FIRSTFRAG &&
4185 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4186 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4187 m_head->m_pkthdr.csum_data + 16) {
4188 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4189 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4190 break;
4191 }
4192 }
4193
4194 /*
4195 * Pack the data into the transmit ring. If we
4196 * don't have room, set the OACTIVE flag and wait
4197 * for the NIC to drain the ring.
4198 */
4199 if (bge_encap(sc, &m_head, &prodidx)) {
4200 if (m_head == NULL)
4201 break;
4202 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4203 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4204 break;
4205 }
4206 ++count;
4207
4208 /*
4209 * If there's a BPF listener, bounce a copy of this frame
4210 * to him.
4211 */
4212#ifdef ETHER_BPF_MTAP
4213 ETHER_BPF_MTAP(ifp, m_head);
4214#else
4215 BPF_MTAP(ifp, m_head);
4216#endif
4217 }
4218
4219 if (count > 0) {
4220 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4221 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4222 /* Transmit. */
4223 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4224 /* 5700 b2 errata */
4225 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4226 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4227
4228 sc->bge_tx_prodidx = prodidx;
4229
4230 /*
4231 * Set a timeout in case the chip goes out to lunch.
4232 */
4233 sc->bge_timer = 5;
4234 }
4235}
4236
4237/*
4238 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4239 * to the mbuf data regions directly in the transmit descriptors.
4240 */
4241static void
4242bge_start(struct ifnet *ifp)
4243{
4244 struct bge_softc *sc;
4245
4246 sc = ifp->if_softc;
4247 BGE_LOCK(sc);
4248 bge_start_locked(ifp);
4249 BGE_UNLOCK(sc);
4250}
4251
4252static void
4253bge_init_locked(struct bge_softc *sc)
4254{
4255 struct ifnet *ifp;
4256 uint16_t *m;
4257
4258 BGE_LOCK_ASSERT(sc);
4259
4260 ifp = sc->bge_ifp;
4261
4262 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4263 return;
4264
4265 /* Cancel pending I/O and flush buffers. */
4266 bge_stop(sc);
4267
4268 bge_stop_fw(sc);
4269 bge_sig_pre_reset(sc, BGE_RESET_START);
4270 bge_reset(sc);
4271 bge_sig_legacy(sc, BGE_RESET_START);
4272 bge_sig_post_reset(sc, BGE_RESET_START);
4273
4274 bge_chipinit(sc);
4275
4276 /*
4277 * Init the various state machines, ring
4278 * control blocks and firmware.
4279 */
4280 if (bge_blockinit(sc)) {
4281 device_printf(sc->bge_dev, "initialization failure\n");
4282 return;
4283 }
4284
4285 ifp = sc->bge_ifp;
4286
4287 /* Specify MTU. */
4288 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4289 ETHER_HDR_LEN + ETHER_CRC_LEN +
4290 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4291
4292 /* Load our MAC address. */
4293 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4294 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4295 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4296
4297 /* Program promiscuous mode. */
4298 bge_setpromisc(sc);
4299
4300 /* Program multicast filter. */
4301 bge_setmulti(sc);
4302
4303 /* Program VLAN tag stripping. */
4304 bge_setvlan(sc);
4305
4306 /* Override UDP checksum offloading. */
4307 if (sc->bge_forced_udpcsum == 0)
4308 sc->bge_csum_features &= ~CSUM_UDP;
4309 else
4310 sc->bge_csum_features |= CSUM_UDP;
4311 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4312 ifp->if_capenable & IFCAP_TXCSUM) {
4313 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4314 ifp->if_hwassist |= sc->bge_csum_features;
4315 }
4316
4317 /* Init RX ring. */
4318 if (bge_init_rx_ring_std(sc) != 0) {
4319 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4320 bge_stop(sc);
4321 return;
4322 }
4323
4324 /*
4325 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4326 * memory to insure that the chip has in fact read the first
4327 * entry of the ring.
4328 */
4329 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4330 uint32_t v, i;
4331 for (i = 0; i < 10; i++) {
4332 DELAY(20);
4333 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4334 if (v == (MCLBYTES - ETHER_ALIGN))
4335 break;
4336 }
4337 if (i == 10)
4338 device_printf (sc->bge_dev,
4339 "5705 A0 chip failed to load RX ring\n");
4340 }
4341
4342 /* Init jumbo RX ring. */
4343 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4344 (MCLBYTES - ETHER_ALIGN)) {
4345 if (bge_init_rx_ring_jumbo(sc) != 0) {
4346 device_printf(sc->bge_dev,
4347 "no memory for jumbo Rx buffers.\n");
4348 bge_stop(sc);
4349 return;
4350 }
4351 }
4352
4353 /* Init our RX return ring index. */
4354 sc->bge_rx_saved_considx = 0;
4355
4356 /* Init our RX/TX stat counters. */
4357 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4358
4359 /* Init TX ring. */
4360 bge_init_tx_ring(sc);
4361
4362 /* Turn on transmitter. */
4363 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4364
4365 /* Turn on receiver. */
4366 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4367
4368 /*
4369 * Set the number of good frames to receive after RX MBUF
4370 * Low Watermark has been reached. After the RX MAC receives
4371 * this number of frames, it will drop subsequent incoming
4372 * frames until the MBUF High Watermark is reached.
4373 */
4374 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4375
4376 /* Clear MAC statistics. */
4377 if (BGE_IS_5705_PLUS(sc))
4378 bge_stats_clear_regs(sc);
4379
4380 /* Tell firmware we're alive. */
4381 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4382
4383#ifdef DEVICE_POLLING
4384 /* Disable interrupts if we are polling. */
4385 if (ifp->if_capenable & IFCAP_POLLING) {
4386 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4387 BGE_PCIMISCCTL_MASK_PCI_INTR);
4388 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4389 } else
4390#endif
4391
4392 /* Enable host interrupts. */
4393 {
4394 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4395 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4396 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4397 }
4398
4399 bge_ifmedia_upd_locked(ifp);
4400
4401 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4402 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4403
4404 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4405}
4406
4407static void
4408bge_init(void *xsc)
4409{
4410 struct bge_softc *sc = xsc;
4411
4412 BGE_LOCK(sc);
4413 bge_init_locked(sc);
4414 BGE_UNLOCK(sc);
4415}
4416
4417/*
4418 * Set media options.
4419 */
4420static int
4421bge_ifmedia_upd(struct ifnet *ifp)
4422{
4423 struct bge_softc *sc = ifp->if_softc;
4424 int res;
4425
4426 BGE_LOCK(sc);
4427 res = bge_ifmedia_upd_locked(ifp);
4428 BGE_UNLOCK(sc);
4429
4430 return (res);
4431}
4432
4433static int
4434bge_ifmedia_upd_locked(struct ifnet *ifp)
4435{
4436 struct bge_softc *sc = ifp->if_softc;
4437 struct mii_data *mii;
4438 struct mii_softc *miisc;
4439 struct ifmedia *ifm;
4440
4441 BGE_LOCK_ASSERT(sc);
4442
4443 ifm = &sc->bge_ifmedia;
4444
4445 /* If this is a 1000baseX NIC, enable the TBI port. */
4446 if (sc->bge_flags & BGE_FLAG_TBI) {
4447 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4448 return (EINVAL);
4449 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4450 case IFM_AUTO:
4451 /*
4452 * The BCM5704 ASIC appears to have a special
4453 * mechanism for programming the autoneg
4454 * advertisement registers in TBI mode.
4455 */
4456 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4457 uint32_t sgdig;
4458 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4459 if (sgdig & BGE_SGDIGSTS_DONE) {
4460 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4461 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4462 sgdig |= BGE_SGDIGCFG_AUTO |
4463 BGE_SGDIGCFG_PAUSE_CAP |
4464 BGE_SGDIGCFG_ASYM_PAUSE;
4465 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4466 sgdig | BGE_SGDIGCFG_SEND);
4467 DELAY(5);
4468 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4469 }
4470 }
4471 break;
4472 case IFM_1000_SX:
4473 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4474 BGE_CLRBIT(sc, BGE_MAC_MODE,
4475 BGE_MACMODE_HALF_DUPLEX);
4476 } else {
4477 BGE_SETBIT(sc, BGE_MAC_MODE,
4478 BGE_MACMODE_HALF_DUPLEX);
4479 }
4480 break;
4481 default:
4482 return (EINVAL);
4483 }
4484 return (0);
4485 }
4486
4487 sc->bge_link_evt++;
4488 mii = device_get_softc(sc->bge_miibus);
4489 if (mii->mii_instance)
4490 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4491 mii_phy_reset(miisc);
4492 mii_mediachg(mii);
4493
4494 /*
4495 * Force an interrupt so that we will call bge_link_upd
4496 * if needed and clear any pending link state attention.
4497 * Without this we are not getting any further interrupts
4498 * for link state changes and thus will not UP the link and
4499 * not be able to send in bge_start_locked. The only
4500 * way to get things working was to receive a packet and
4501 * get an RX intr.
4502 * bge_tick should help for fiber cards and we might not
4503 * need to do this here if BGE_FLAG_TBI is set but as
4504 * we poll for fiber anyway it should not harm.
4505 */
4506 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4507 sc->bge_flags & BGE_FLAG_5788)
4508 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4509 else
4510 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4511
4512 return (0);
4513}
4514
4515/*
4516 * Report current media status.
4517 */
4518static void
4519bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4520{
4521 struct bge_softc *sc = ifp->if_softc;
4522 struct mii_data *mii;
4523
4524 BGE_LOCK(sc);
4525
4526 if (sc->bge_flags & BGE_FLAG_TBI) {
4527 ifmr->ifm_status = IFM_AVALID;
4528 ifmr->ifm_active = IFM_ETHER;
4529 if (CSR_READ_4(sc, BGE_MAC_STS) &
4530 BGE_MACSTAT_TBI_PCS_SYNCHED)
4531 ifmr->ifm_status |= IFM_ACTIVE;
4532 else {
4533 ifmr->ifm_active |= IFM_NONE;
4534 BGE_UNLOCK(sc);
4535 return;
4536 }
4537 ifmr->ifm_active |= IFM_1000_SX;
4538 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4539 ifmr->ifm_active |= IFM_HDX;
4540 else
4541 ifmr->ifm_active |= IFM_FDX;
4542 BGE_UNLOCK(sc);
4543 return;
4544 }
4545
4546 mii = device_get_softc(sc->bge_miibus);
4547 mii_pollstat(mii);
4548 ifmr->ifm_active = mii->mii_media_active;
4549 ifmr->ifm_status = mii->mii_media_status;
4550
4551 BGE_UNLOCK(sc);
4552}
4553
4554static int
4555bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4556{
4557 struct bge_softc *sc = ifp->if_softc;
4558 struct ifreq *ifr = (struct ifreq *) data;
4559 struct mii_data *mii;
4560 int flags, mask, error = 0;
4561
4562 switch (command) {
4563 case SIOCSIFMTU:
4564 if (ifr->ifr_mtu < ETHERMIN ||
4565 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4566 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4567 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4568 ifr->ifr_mtu > ETHERMTU))
4569 error = EINVAL;
4570 else if (ifp->if_mtu != ifr->ifr_mtu) {
4571 ifp->if_mtu = ifr->ifr_mtu;
4572 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4573 bge_init(sc);
4574 }
4575 break;
4576 case SIOCSIFFLAGS:
4577 BGE_LOCK(sc);
4578 if (ifp->if_flags & IFF_UP) {
4579 /*
4580 * If only the state of the PROMISC flag changed,
4581 * then just use the 'set promisc mode' command
4582 * instead of reinitializing the entire NIC. Doing
4583 * a full re-init means reloading the firmware and
4584 * waiting for it to start up, which may take a
4585 * second or two. Similarly for ALLMULTI.
4586 */
4587 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4588 flags = ifp->if_flags ^ sc->bge_if_flags;
4589 if (flags & IFF_PROMISC)
4590 bge_setpromisc(sc);
4591 if (flags & IFF_ALLMULTI)
4592 bge_setmulti(sc);
4593 } else
4594 bge_init_locked(sc);
4595 } else {
4596 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4597 bge_stop(sc);
4598 }
4599 }
4600 sc->bge_if_flags = ifp->if_flags;
4601 BGE_UNLOCK(sc);
4602 error = 0;
4603 break;
4604 case SIOCADDMULTI:
4605 case SIOCDELMULTI:
4606 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4607 BGE_LOCK(sc);
4608 bge_setmulti(sc);
4609 BGE_UNLOCK(sc);
4610 error = 0;
4611 }
4612 break;
4613 case SIOCSIFMEDIA:
4614 case SIOCGIFMEDIA:
4615 if (sc->bge_flags & BGE_FLAG_TBI) {
4616 error = ifmedia_ioctl(ifp, ifr,
4617 &sc->bge_ifmedia, command);
4618 } else {
4619 mii = device_get_softc(sc->bge_miibus);
4620 error = ifmedia_ioctl(ifp, ifr,
4621 &mii->mii_media, command);
4622 }
4623 break;
4624 case SIOCSIFCAP:
4625 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4626#ifdef DEVICE_POLLING
4627 if (mask & IFCAP_POLLING) {
4628 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4629 error = ether_poll_register(bge_poll, ifp);
4630 if (error)
4631 return (error);
4632 BGE_LOCK(sc);
4633 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4634 BGE_PCIMISCCTL_MASK_PCI_INTR);
4635 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4636 ifp->if_capenable |= IFCAP_POLLING;
4637 BGE_UNLOCK(sc);
4638 } else {
4639 error = ether_poll_deregister(ifp);
4640 /* Enable interrupt even in error case */
4641 BGE_LOCK(sc);
4642 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4643 BGE_PCIMISCCTL_MASK_PCI_INTR);
4644 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4645 ifp->if_capenable &= ~IFCAP_POLLING;
4646 BGE_UNLOCK(sc);
4647 }
4648 }
4649#endif
4650 if ((mask & IFCAP_TXCSUM) != 0 &&
4651 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
4652 ifp->if_capenable ^= IFCAP_TXCSUM;
4653 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
4654 ifp->if_hwassist |= sc->bge_csum_features;
4655 else
4656 ifp->if_hwassist &= ~sc->bge_csum_features;
4657 }
4658
4659 if ((mask & IFCAP_RXCSUM) != 0 &&
4660 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
4661 ifp->if_capenable ^= IFCAP_RXCSUM;
4662
4663 if ((mask & IFCAP_TSO4) != 0 &&
4664 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4665 ifp->if_capenable ^= IFCAP_TSO4;
4666 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4667 ifp->if_hwassist |= CSUM_TSO;
4668 else
4669 ifp->if_hwassist &= ~CSUM_TSO;
4670 }
4671
4672 if (mask & IFCAP_VLAN_MTU) {
4673 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4674 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4675 bge_init(sc);
4676 }
4677
4678 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
4679 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
4680 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4681 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
4682 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
4683 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4684 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
4685 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
4686 BGE_LOCK(sc);
4687 bge_setvlan(sc);
4688 BGE_UNLOCK(sc);
4689 }
4690#ifdef VLAN_CAPABILITIES
4691 VLAN_CAPABILITIES(ifp);
4692#endif
4693 break;
4694 default:
4695 error = ether_ioctl(ifp, command, data);
4696 break;
4697 }
4698
4699 return (error);
4700}
4701
4702static void
4703bge_watchdog(struct bge_softc *sc)
4704{
4705 struct ifnet *ifp;
4706
4707 BGE_LOCK_ASSERT(sc);
4708
4709 if (sc->bge_timer == 0 || --sc->bge_timer)
4710 return;
4711
4712 ifp = sc->bge_ifp;
4713
4714 if_printf(ifp, "watchdog timeout -- resetting\n");
4715
4716 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4717 bge_init_locked(sc);
4718
4719 ifp->if_oerrors++;
4720}
4721
4722/*
4723 * Stop the adapter and free any mbufs allocated to the
4724 * RX and TX lists.
4725 */
4726static void
4727bge_stop(struct bge_softc *sc)
4728{
4729 struct ifnet *ifp;
4730
4731 BGE_LOCK_ASSERT(sc);
4732
4733 ifp = sc->bge_ifp;
4734
4735 callout_stop(&sc->bge_stat_ch);
4736
4737 /* Disable host interrupts. */
4738 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4739 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4740
4741 /*
4742 * Tell firmware we're shutting down.
4743 */
4744 bge_stop_fw(sc);
4745 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4746
4747 /*
4748 * Disable all of the receiver blocks.
4749 */
4750 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4751 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4752 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4753 if (!(BGE_IS_5705_PLUS(sc)))
4754 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4755 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4756 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4757 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4758
4759 /*
4760 * Disable all of the transmit blocks.
4761 */
4762 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4763 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4764 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4765 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4766 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4767 if (!(BGE_IS_5705_PLUS(sc)))
4768 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4769 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4770
4771 /*
4772 * Shut down all of the memory managers and related
4773 * state machines.
4774 */
4775 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4776 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4777 if (!(BGE_IS_5705_PLUS(sc)))
4778 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4779 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4780 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4781 if (!(BGE_IS_5705_PLUS(sc))) {
4782 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4783 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4784 }
4785 /* Update MAC statistics. */
4786 if (BGE_IS_5705_PLUS(sc))
4787 bge_stats_update_regs(sc);
4788
4789 bge_reset(sc);
4790 bge_sig_legacy(sc, BGE_RESET_STOP);
4791 bge_sig_post_reset(sc, BGE_RESET_STOP);
4792
4793 /*
4794 * Keep the ASF firmware running if up.
4795 */
4796 if (sc->bge_asf_mode & ASF_STACKUP)
4797 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4798 else
4799 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4800
4801 /* Free the RX lists. */
4802 bge_free_rx_ring_std(sc);
4803
4804 /* Free jumbo RX list. */
4805 if (BGE_IS_JUMBO_CAPABLE(sc))
4806 bge_free_rx_ring_jumbo(sc);
4807
4808 /* Free TX buffers. */
4809 bge_free_tx_ring(sc);
4810
4811 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4812
4813 /* Clear MAC's link state (PHY may still have link UP). */
4814 if (bootverbose && sc->bge_link)
4815 if_printf(sc->bge_ifp, "link DOWN\n");
4816 sc->bge_link = 0;
4817
4818 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4819}
4820
4821/*
4822 * Stop all chip I/O so that the kernel's probe routines don't
4823 * get confused by errant DMAs when rebooting.
4824 */
4825static int
4826bge_shutdown(device_t dev)
4827{
4828 struct bge_softc *sc;
4829
4830 sc = device_get_softc(dev);
4831 BGE_LOCK(sc);
4832 bge_stop(sc);
4833 bge_reset(sc);
4834 BGE_UNLOCK(sc);
4835
4836 return (0);
4837}
4838
4839static int
4840bge_suspend(device_t dev)
4841{
4842 struct bge_softc *sc;
4843
4844 sc = device_get_softc(dev);
4845 BGE_LOCK(sc);
4846 bge_stop(sc);
4847 BGE_UNLOCK(sc);
4848
4849 return (0);
4850}
4851
4852static int
4853bge_resume(device_t dev)
4854{
4855 struct bge_softc *sc;
4856 struct ifnet *ifp;
4857
4858 sc = device_get_softc(dev);
4859 BGE_LOCK(sc);
4860 ifp = sc->bge_ifp;
4861 if (ifp->if_flags & IFF_UP) {
4862 bge_init_locked(sc);
4863 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4864 bge_start_locked(ifp);
4865 }
4866 BGE_UNLOCK(sc);
4867
4868 return (0);
4869}
4870
4871static void
4872bge_link_upd(struct bge_softc *sc)
4873{
4874 struct mii_data *mii;
4875 uint32_t link, status;
4876
4877 BGE_LOCK_ASSERT(sc);
4878
4879 /* Clear 'pending link event' flag. */
4880 sc->bge_link_evt = 0;
4881
4882 /*
4883 * Process link state changes.
4884 * Grrr. The link status word in the status block does
4885 * not work correctly on the BCM5700 rev AX and BX chips,
4886 * according to all available information. Hence, we have
4887 * to enable MII interrupts in order to properly obtain
4888 * async link changes. Unfortunately, this also means that
4889 * we have to read the MAC status register to detect link
4890 * changes, thereby adding an additional register access to
4891 * the interrupt handler.
4892 *
4893 * XXX: perhaps link state detection procedure used for
4894 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4895 */
4896
4897 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4898 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4899 status = CSR_READ_4(sc, BGE_MAC_STS);
4900 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4901 mii = device_get_softc(sc->bge_miibus);
4902 mii_pollstat(mii);
4903 if (!sc->bge_link &&
4904 mii->mii_media_status & IFM_ACTIVE &&
4905 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4906 sc->bge_link++;
4907 if (bootverbose)
4908 if_printf(sc->bge_ifp, "link UP\n");
4909 } else if (sc->bge_link &&
4910 (!(mii->mii_media_status & IFM_ACTIVE) ||
4911 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4912 sc->bge_link = 0;
4913 if (bootverbose)
4914 if_printf(sc->bge_ifp, "link DOWN\n");
4915 }
4916
4917 /* Clear the interrupt. */
4918 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4919 BGE_EVTENB_MI_INTERRUPT);
4920 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4921 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4922 BRGPHY_INTRS);
4923 }
4924 return;
4925 }
4926
4927 if (sc->bge_flags & BGE_FLAG_TBI) {
4928 status = CSR_READ_4(sc, BGE_MAC_STS);
4929 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4930 if (!sc->bge_link) {
4931 sc->bge_link++;
4932 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4933 BGE_CLRBIT(sc, BGE_MAC_MODE,
4934 BGE_MACMODE_TBI_SEND_CFGS);
4935 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4936 if (bootverbose)
4937 if_printf(sc->bge_ifp, "link UP\n");
4938 if_link_state_change(sc->bge_ifp,
4939 LINK_STATE_UP);
4940 }
4941 } else if (sc->bge_link) {
4942 sc->bge_link = 0;
4943 if (bootverbose)
4944 if_printf(sc->bge_ifp, "link DOWN\n");
4945 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4946 }
4947 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4948 /*
4949 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4950 * in status word always set. Workaround this bug by reading
4951 * PHY link status directly.
4952 */
4953 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4954
4955 if (link != sc->bge_link ||
4956 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4957 mii = device_get_softc(sc->bge_miibus);
4958 mii_pollstat(mii);
4959 if (!sc->bge_link &&
4960 mii->mii_media_status & IFM_ACTIVE &&
4961 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4962 sc->bge_link++;
4963 if (bootverbose)
4964 if_printf(sc->bge_ifp, "link UP\n");
4965 } else if (sc->bge_link &&
4966 (!(mii->mii_media_status & IFM_ACTIVE) ||
4967 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4968 sc->bge_link = 0;
4969 if (bootverbose)
4970 if_printf(sc->bge_ifp, "link DOWN\n");
4971 }
4972 }
4973 } else {
4974 /*
4975 * Discard link events for MII/GMII controllers
4976 * if MI auto-polling is disabled.
4977 */
4978 }
4979
4980 /* Clear the attention. */
4981 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4982 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4983 BGE_MACSTAT_LINK_CHANGED);
4984}
4985
4986static void
4987bge_add_sysctls(struct bge_softc *sc)
4988{
4989 struct sysctl_ctx_list *ctx;
4990 struct sysctl_oid_list *children;
4991 char tn[32];
4992 int unit;
4993
4994 ctx = device_get_sysctl_ctx(sc->bge_dev);
4995 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4996
4997#ifdef BGE_REGISTER_DEBUG
4998 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4999 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5000 "Debug Information");
5001
5002 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5003 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5004 "Register Read");
5005
5006 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5007 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5008 "Memory Read");
5009
5010#endif
5011
5012 unit = device_get_unit(sc->bge_dev);
5013 /*
5014 * A common design characteristic for many Broadcom client controllers
5015 * is that they only support a single outstanding DMA read operation
5016 * on the PCIe bus. This means that it will take twice as long to fetch
5017 * a TX frame that is split into header and payload buffers as it does
5018 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5019 * these controllers, coalescing buffers to reduce the number of memory
5020 * reads is effective way to get maximum performance(about 940Mbps).
5021 * Without collapsing TX buffers the maximum TCP bulk transfer
5022 * performance is about 850Mbps. However forcing coalescing mbufs
5023 * consumes a lot of CPU cycles, so leave it off by default.
5024 */
5025 sc->bge_forced_collapse = 0;
5026 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5027 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5028 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5029 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5030 "Number of fragmented TX buffers of a frame allowed before "
5031 "forced collapsing");
5032
5033 /*
5034 * It seems all Broadcom controllers have a bug that can generate UDP
5035 * datagrams with checksum value 0 when TX UDP checksum offloading is
5036 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5037 * Even though the probability of generating such UDP datagrams is
5038 * low, I don't want to see FreeBSD boxes to inject such datagrams
5039 * into network so disable UDP checksum offloading by default. Users
5040 * still override this behavior by setting a sysctl variable,
5041 * dev.bge.0.forced_udpcsum.
5042 */
5043 sc->bge_forced_udpcsum = 0;
5044 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5045 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5046 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5047 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5048 "Enable UDP checksum offloading even if controller can "
5049 "generate UDP checksum value 0");
5050
5051 if (BGE_IS_5705_PLUS(sc))
5052 bge_add_sysctl_stats_regs(sc, ctx, children);
5053 else
5054 bge_add_sysctl_stats(sc, ctx, children);
5055}
5056
5057#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5058 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5059 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5060 desc)
5061
5062static void
5063bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5064 struct sysctl_oid_list *parent)
5065{
5066 struct sysctl_oid *tree;
5067 struct sysctl_oid_list *children, *schildren;
5068
5069 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5070 NULL, "BGE Statistics");
5071 schildren = children = SYSCTL_CHILDREN(tree);
5072 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5073 children, COSFramesDroppedDueToFilters,
5074 "FramesDroppedDueToFilters");
5075 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5076 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5077 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5078 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5079 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5080 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5081 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5082 children, ifInDiscards, "InputDiscards");
5083 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5084 children, ifInErrors, "InputErrors");
5085 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5086 children, nicRecvThresholdHit, "RecvThresholdHit");
5087 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5088 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5089 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5090 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5091 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5092 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5093 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5094 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5095 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5096 children, nicRingStatusUpdate, "RingStatusUpdate");
5097 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5098 children, nicInterrupts, "Interrupts");
5099 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5100 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5101 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5102 children, nicSendThresholdHit, "SendThresholdHit");
5103
5104 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5105 NULL, "BGE RX Statistics");
5106 children = SYSCTL_CHILDREN(tree);
5107 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5108 children, rxstats.ifHCInOctets, "ifHCInOctets");
5109 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5110 children, rxstats.etherStatsFragments, "Fragments");
5111 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5112 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5113 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5114 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5115 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5116 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5117 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5118 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5119 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5120 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5121 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5122 children, rxstats.xoffPauseFramesReceived,
5123 "xoffPauseFramesReceived");
5124 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5125 children, rxstats.macControlFramesReceived,
5126 "ControlFramesReceived");
5127 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5128 children, rxstats.xoffStateEntered, "xoffStateEntered");
5129 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5130 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5131 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5132 children, rxstats.etherStatsJabbers, "Jabbers");
5133 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5134 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5135 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5136 children, rxstats.inRangeLengthError, "inRangeLengthError");
5137 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5138 children, rxstats.outRangeLengthError, "outRangeLengthError");
5139
5140 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5141 NULL, "BGE TX Statistics");
5142 children = SYSCTL_CHILDREN(tree);
5143 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5144 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5145 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5146 children, txstats.etherStatsCollisions, "Collisions");
5147 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5148 children, txstats.outXonSent, "XonSent");
5149 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5150 children, txstats.outXoffSent, "XoffSent");
5151 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5152 children, txstats.flowControlDone, "flowControlDone");
5153 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5154 children, txstats.dot3StatsInternalMacTransmitErrors,
5155 "InternalMacTransmitErrors");
5156 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5157 children, txstats.dot3StatsSingleCollisionFrames,
5158 "SingleCollisionFrames");
5159 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5160 children, txstats.dot3StatsMultipleCollisionFrames,
5161 "MultipleCollisionFrames");
5162 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5163 children, txstats.dot3StatsDeferredTransmissions,
5164 "DeferredTransmissions");
5165 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5166 children, txstats.dot3StatsExcessiveCollisions,
5167 "ExcessiveCollisions");
5168 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5169 children, txstats.dot3StatsLateCollisions,
5170 "LateCollisions");
5171 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5172 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5173 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5174 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5175 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5176 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5177 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5178 children, txstats.dot3StatsCarrierSenseErrors,
5179 "CarrierSenseErrors");
5180 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5181 children, txstats.ifOutDiscards, "Discards");
5182 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5183 children, txstats.ifOutErrors, "Errors");
5184}
5185
5186#undef BGE_SYSCTL_STAT
5187
5188#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5189 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5190
5191static void
5192bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5193 struct sysctl_oid_list *parent)
5194{
5195 struct sysctl_oid *tree;
5196 struct sysctl_oid_list *child, *schild;
5197 struct bge_mac_stats *stats;
5198
5199 stats = &sc->bge_mac_stats;
5200 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5201 NULL, "BGE Statistics");
5202 schild = child = SYSCTL_CHILDREN(tree);
5203 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5204 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5205 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5206 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5207 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5208 &stats->DmaWriteHighPriQueueFull,
5209 "NIC DMA Write High Priority Queue Full");
5210 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5211 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5212 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5213 &stats->InputDiscards, "Discarded Input Frames");
5214 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5215 &stats->InputErrors, "Input Errors");
5216 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5217 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5218
5219 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5220 NULL, "BGE RX Statistics");
5221 child = SYSCTL_CHILDREN(tree);
5222 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5223 &stats->ifHCInOctets, "Inbound Octets");
5224 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5225 &stats->etherStatsFragments, "Fragments");
5226 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5227 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5228 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5229 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5230 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5231 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5232 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5233 &stats->dot3StatsFCSErrors, "FCS Errors");
5234 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5235 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5236 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5237 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5238 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5239 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5240 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5241 &stats->macControlFramesReceived, "MAC Control Frames Received");
5242 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5243 &stats->xoffStateEntered, "XOFF State Entered");
5244 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5245 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5246 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5247 &stats->etherStatsJabbers, "Jabbers");
5248 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5249 &stats->etherStatsUndersizePkts, "Undersized Packets");
5250
5251 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5252 NULL, "BGE TX Statistics");
5253 child = SYSCTL_CHILDREN(tree);
5254 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5255 &stats->ifHCOutOctets, "Outbound Octets");
5256 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5257 &stats->etherStatsCollisions, "TX Collisions");
5258 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5259 &stats->outXonSent, "XON Sent");
5260 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5261 &stats->outXoffSent, "XOFF Sent");
5262 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5263 &stats->dot3StatsInternalMacTransmitErrors,
5264 "Internal MAC TX Errors");
5265 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5266 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5267 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5268 &stats->dot3StatsMultipleCollisionFrames,
5269 "Multiple Collision Frames");
5270 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5271 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5272 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5273 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5274 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5275 &stats->dot3StatsLateCollisions, "Late Collisions");
5276 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5277 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5278 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5279 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5280 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5281 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5282}
5283
5284#undef BGE_SYSCTL_STAT_ADD64
5285
5286static int
5287bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5288{
5289 struct bge_softc *sc;
5290 uint32_t result;
5291 int offset;
5292
5293 sc = (struct bge_softc *)arg1;
5294 offset = arg2;
5295 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5296 offsetof(bge_hostaddr, bge_addr_lo));
5297 return (sysctl_handle_int(oidp, &result, 0, req));
5298}
5299
5300#ifdef BGE_REGISTER_DEBUG
5301static int
5302bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5303{
5304 struct bge_softc *sc;
5305 uint16_t *sbdata;
5306 int error;
5307 int result;
5308 int i, j;
5309
5310 result = -1;
5311 error = sysctl_handle_int(oidp, &result, 0, req);
5312 if (error || (req->newptr == NULL))
5313 return (error);
5314
5315 if (result == 1) {
5316 sc = (struct bge_softc *)arg1;
5317
5318 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5319 printf("Status Block:\n");
5320 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5321 printf("%06x:", i);
5322 for (j = 0; j < 8; j++) {
5323 printf(" %04x", sbdata[i]);
5324 i += 4;
5325 }
5326 printf("\n");
5327 }
5328
5329 printf("Registers:\n");
5330 for (i = 0x800; i < 0xA00; ) {
5331 printf("%06x:", i);
5332 for (j = 0; j < 8; j++) {
5333 printf(" %08x", CSR_READ_4(sc, i));
5334 i += 4;
5335 }
5336 printf("\n");
5337 }
5338
5339 printf("Hardware Flags:\n");
5340 if (BGE_IS_5755_PLUS(sc))
5341 printf(" - 5755 Plus\n");
5342 if (BGE_IS_575X_PLUS(sc))
5343 printf(" - 575X Plus\n");
5344 if (BGE_IS_5705_PLUS(sc))
5345 printf(" - 5705 Plus\n");
5346 if (BGE_IS_5714_FAMILY(sc))
5347 printf(" - 5714 Family\n");
5348 if (BGE_IS_5700_FAMILY(sc))
5349 printf(" - 5700 Family\n");
5350 if (sc->bge_flags & BGE_FLAG_JUMBO)
5351 printf(" - Supports Jumbo Frames\n");
5352 if (sc->bge_flags & BGE_FLAG_PCIX)
5353 printf(" - PCI-X Bus\n");
5354 if (sc->bge_flags & BGE_FLAG_PCIE)
5355 printf(" - PCI Express Bus\n");
5356 if (sc->bge_flags & BGE_FLAG_NO_3LED)
5357 printf(" - No 3 LEDs\n");
5358 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5359 printf(" - RX Alignment Bug\n");
5360 }
5361
5362 return (error);
5363}
5364
5365static int
5366bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5367{
5368 struct bge_softc *sc;
5369 int error;
5370 uint16_t result;
5371 uint32_t val;
5372
5373 result = -1;
5374 error = sysctl_handle_int(oidp, &result, 0, req);
5375 if (error || (req->newptr == NULL))
5376 return (error);
5377
5378 if (result < 0x8000) {
5379 sc = (struct bge_softc *)arg1;
5380 val = CSR_READ_4(sc, result);
5381 printf("reg 0x%06X = 0x%08X\n", result, val);
5382 }
5383
5384 return (error);
5385}
5386
5387static int
5388bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5389{
5390 struct bge_softc *sc;
5391 int error;
5392 uint16_t result;
5393 uint32_t val;
5394
5395 result = -1;
5396 error = sysctl_handle_int(oidp, &result, 0, req);
5397 if (error || (req->newptr == NULL))
5398 return (error);
5399
5400 if (result < 0x8000) {
5401 sc = (struct bge_softc *)arg1;
5402 val = bge_readmem_ind(sc, result);
5403 printf("mem 0x%06X = 0x%08X\n", result, val);
5404 }
5405
5406 return (error);
5407}
5408#endif
5409
5410static int
5411bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5412{
5413
5414 if (sc->bge_flags & BGE_FLAG_EADDR)
5415 return (1);
5416
5417#ifdef __sparc64__
5418 OF_getetheraddr(sc->bge_dev, ether_addr);
5419 return (0);
5420#endif
5421 return (1);
5422}
5423
5424static int
5425bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5426{
5427 uint32_t mac_addr;
5428
5429 mac_addr = bge_readmem_ind(sc, 0x0c14);
5430 if ((mac_addr >> 16) == 0x484b) {
5431 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5432 ether_addr[1] = (uint8_t)mac_addr;
5433 mac_addr = bge_readmem_ind(sc, 0x0c18);
5434 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5435 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5436 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5437 ether_addr[5] = (uint8_t)mac_addr;
5438 return (0);
5439 }
5440 return (1);
5441}
5442
5443static int
5444bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5445{
5446 int mac_offset = BGE_EE_MAC_OFFSET;
5447
5448 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5449 mac_offset = BGE_EE_MAC_OFFSET_5906;
5450
5451 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5452 ETHER_ADDR_LEN));
5453}
5454
5455static int
5456bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5457{
5458
5459 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5460 return (1);
5461
5462 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5463 ETHER_ADDR_LEN));
5464}
5465
5466static int
5467bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5468{
5469 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5470 /* NOTE: Order is critical */
5471 bge_get_eaddr_fw,
5472 bge_get_eaddr_mem,
5473 bge_get_eaddr_nvram,
5474 bge_get_eaddr_eeprom,
5475 NULL
5476 };
5477 const bge_eaddr_fcn_t *func;
5478
5479 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5480 if ((*func)(sc, eaddr) == 0)
5481 break;
5482 }
5483 return (*func == NULL ? ENXIO : 0);
5484}