Deleted Added
full compact
if_bge.c (190194) if_bge.c (190319)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 190194 2009-03-21 00:23:07Z marius $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 190319 2009-03-23 14:36:50Z marius $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83
84#include <net/if.h>
85#include <net/if_arp.h>
86#include <net/ethernet.h>
87#include <net/if_dl.h>
88#include <net/if_media.h>
89
90#include <net/bpf.h>
91
92#include <net/if_types.h>
93#include <net/if_vlan_var.h>
94
95#include <netinet/in_systm.h>
96#include <netinet/in.h>
97#include <netinet/ip.h>
98
99#include <machine/bus.h>
100#include <machine/resource.h>
101#include <sys/bus.h>
102#include <sys/rman.h>
103
104#include <dev/mii/mii.h>
105#include <dev/mii/miivar.h>
106#include "miidevs.h"
107#include <dev/mii/brgphyreg.h>
108
109#ifdef __sparc64__
110#include <dev/ofw/ofw_bus.h>
111#include <dev/ofw/openfirm.h>
112#include <machine/ofw_machdep.h>
113#include <machine/ver.h>
114#endif
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118
119#include <dev/bge/if_bgereg.h>
120
121#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
122#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
123
124MODULE_DEPEND(bge, pci, 1, 1, 1);
125MODULE_DEPEND(bge, ether, 1, 1, 1);
126MODULE_DEPEND(bge, miibus, 1, 1, 1);
127
128/* "device miibus" required. See GENERIC if you get errors here. */
129#include "miibus_if.h"
130
131/*
132 * Various supported device vendors/types and their names. Note: the
133 * spec seems to indicate that the hardware still has Alteon's vendor
134 * ID burned into it, though it will always be overriden by the vendor
135 * ID in the EEPROM. Just to be safe, we cover all possibilities.
136 */
137static const struct bge_type {
138 uint16_t bge_vid;
139 uint16_t bge_did;
140} bge_devs[] = {
141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
142 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
143
144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
147
148 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
149
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
201
202 { SK_VENDORID, SK_DEVICEID_ALTIMA },
203
204 { TC_VENDORID, TC_DEVICEID_3C996 },
205
206 { 0, 0 }
207};
208
209static const struct bge_vendor {
210 uint16_t v_id;
211 const char *v_name;
212} bge_vendors[] = {
213 { ALTEON_VENDORID, "Alteon" },
214 { ALTIMA_VENDORID, "Altima" },
215 { APPLE_VENDORID, "Apple" },
216 { BCOM_VENDORID, "Broadcom" },
217 { SK_VENDORID, "SysKonnect" },
218 { TC_VENDORID, "3Com" },
219
220 { 0, NULL }
221};
222
223static const struct bge_revision {
224 uint32_t br_chipid;
225 const char *br_name;
226} bge_revisions[] = {
227 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
228 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
229 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
230 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
231 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
232 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
233 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
234 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
235 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
236 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
237 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
238 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
239 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
240 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
241 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
242 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
243 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
244 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
245 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
246 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
247 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
248 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
249 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
250 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
251 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
252 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
253 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
254 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
255 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
256 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
257 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
258 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
259 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
260 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
261 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
262 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
263 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
264 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
265 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
266 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
267 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
268 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
269 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
270 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
271 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
272 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
273 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
274 /* 5754 and 5787 share the same ASIC ID */
275 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
276 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
277 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
278 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
279 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
280
281 { 0, NULL }
282};
283
284/*
285 * Some defaults for major revisions, so that newer steppings
286 * that we don't know about have a shot at working.
287 */
288static const struct bge_revision bge_majorrevs[] = {
289 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
290 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
291 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
292 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
293 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
294 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
295 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
296 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
297 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
298 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
299 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
302 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
303
304 { 0, NULL }
305};
306
307#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
308#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
309#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
310#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
311#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
312
313const struct bge_revision * bge_lookup_rev(uint32_t);
314const struct bge_vendor * bge_lookup_vendor(uint16_t);
315
316typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
317
318static int bge_probe(device_t);
319static int bge_attach(device_t);
320static int bge_detach(device_t);
321static int bge_suspend(device_t);
322static int bge_resume(device_t);
323static void bge_release_resources(struct bge_softc *);
324static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
325static int bge_dma_alloc(device_t);
326static void bge_dma_free(struct bge_softc *);
327
328static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
329static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
330static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
331static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
332static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
333
334static void bge_txeof(struct bge_softc *);
335static void bge_rxeof(struct bge_softc *);
336
337static void bge_asf_driver_up (struct bge_softc *);
338static void bge_tick(void *);
339static void bge_stats_update(struct bge_softc *);
340static void bge_stats_update_regs(struct bge_softc *);
341static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
342
343static void bge_intr(void *);
344static void bge_start_locked(struct ifnet *);
345static void bge_start(struct ifnet *);
346static int bge_ioctl(struct ifnet *, u_long, caddr_t);
347static void bge_init_locked(struct bge_softc *);
348static void bge_init(void *);
349static void bge_stop(struct bge_softc *);
350static void bge_watchdog(struct bge_softc *);
351static int bge_shutdown(device_t);
352static int bge_ifmedia_upd_locked(struct ifnet *);
353static int bge_ifmedia_upd(struct ifnet *);
354static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
355
356static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
357static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
358
359static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
360static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
361
362static void bge_setpromisc(struct bge_softc *);
363static void bge_setmulti(struct bge_softc *);
364static void bge_setvlan(struct bge_softc *);
365
366static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
367static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
368static int bge_init_rx_ring_std(struct bge_softc *);
369static void bge_free_rx_ring_std(struct bge_softc *);
370static int bge_init_rx_ring_jumbo(struct bge_softc *);
371static void bge_free_rx_ring_jumbo(struct bge_softc *);
372static void bge_free_tx_ring(struct bge_softc *);
373static int bge_init_tx_ring(struct bge_softc *);
374
375static int bge_chipinit(struct bge_softc *);
376static int bge_blockinit(struct bge_softc *);
377
378static int bge_has_eaddr(struct bge_softc *);
379static uint32_t bge_readmem_ind(struct bge_softc *, int);
380static void bge_writemem_ind(struct bge_softc *, int, int);
381static void bge_writembx(struct bge_softc *, int, int);
382#ifdef notdef
383static uint32_t bge_readreg_ind(struct bge_softc *, int);
384#endif
385static void bge_writemem_direct(struct bge_softc *, int, int);
386static void bge_writereg_ind(struct bge_softc *, int, int);
387static void bge_set_max_readrq(struct bge_softc *, int);
388
389static int bge_miibus_readreg(device_t, int, int);
390static int bge_miibus_writereg(device_t, int, int, int);
391static void bge_miibus_statchg(device_t);
392#ifdef DEVICE_POLLING
393static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
394#endif
395
396#define BGE_RESET_START 1
397#define BGE_RESET_STOP 2
398static void bge_sig_post_reset(struct bge_softc *, int);
399static void bge_sig_legacy(struct bge_softc *, int);
400static void bge_sig_pre_reset(struct bge_softc *, int);
401static int bge_reset(struct bge_softc *);
402static void bge_link_upd(struct bge_softc *);
403
404/*
405 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
406 * leak information to untrusted users. It is also known to cause alignment
407 * traps on certain architectures.
408 */
409#ifdef BGE_REGISTER_DEBUG
410static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
411static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
412static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
413#endif
414static void bge_add_sysctls(struct bge_softc *);
415static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
416
417static device_method_t bge_methods[] = {
418 /* Device interface */
419 DEVMETHOD(device_probe, bge_probe),
420 DEVMETHOD(device_attach, bge_attach),
421 DEVMETHOD(device_detach, bge_detach),
422 DEVMETHOD(device_shutdown, bge_shutdown),
423 DEVMETHOD(device_suspend, bge_suspend),
424 DEVMETHOD(device_resume, bge_resume),
425
426 /* bus interface */
427 DEVMETHOD(bus_print_child, bus_generic_print_child),
428 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
429
430 /* MII interface */
431 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
432 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
433 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
434
435 { 0, 0 }
436};
437
438static driver_t bge_driver = {
439 "bge",
440 bge_methods,
441 sizeof(struct bge_softc)
442};
443
444static devclass_t bge_devclass;
445
446DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
447DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
448
449static int bge_allow_asf = 1;
450
451TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
452
453SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
454SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
455 "Allow ASF mode if available");
456
457#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
458#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
459#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
460#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
461#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
462
463static int
464bge_has_eaddr(struct bge_softc *sc)
465{
466#ifdef __sparc64__
467 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
468 device_t dev;
469 uint32_t subvendor;
470
471 dev = sc->bge_dev;
472
473 /*
474 * The on-board BGEs found in sun4u machines aren't fitted with
475 * an EEPROM which means that we have to obtain the MAC address
476 * via OFW and that some tests will always fail. We distinguish
477 * such BGEs by the subvendor ID, which also has to be obtained
478 * from OFW instead of the PCI configuration space as the latter
479 * indicates Broadcom as the subvendor of the netboot interface.
480 * For early Blade 1500 and 2500 we even have to check the OFW
481 * device path as the subvendor ID always defaults to Broadcom
482 * there.
483 */
484 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
485 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
486 subvendor == SUN_VENDORID)
487 return (0);
488 memset(buf, 0, sizeof(buf));
489 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
490 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
491 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
492 return (0);
493 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
494 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
495 return (0);
496 }
497#endif
498 return (1);
499}
500
501static uint32_t
502bge_readmem_ind(struct bge_softc *sc, int off)
503{
504 device_t dev;
505 uint32_t val;
506
507 dev = sc->bge_dev;
508
509 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
510 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
511 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
512 return (val);
513}
514
515static void
516bge_writemem_ind(struct bge_softc *sc, int off, int val)
517{
518 device_t dev;
519
520 dev = sc->bge_dev;
521
522 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
523 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
524 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
525}
526
527/*
528 * PCI Express only
529 */
530static void
531bge_set_max_readrq(struct bge_softc *sc, int expr_ptr)
532{
533 device_t dev;
534 uint16_t val;
535
536 KASSERT((sc->bge_flags & BGE_FLAG_PCIE) && expr_ptr != 0,
537 ("%s: not applicable", __func__));
538
539 dev = sc->bge_dev;
540
541 val = pci_read_config(dev, expr_ptr + BGE_PCIE_DEVCTL, 2);
542 if ((val & BGE_PCIE_DEVCTL_MAX_READRQ_MASK) !=
543 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
544 if (bootverbose)
545 device_printf(dev, "adjust device control 0x%04x ",
546 val);
547 val &= ~BGE_PCIE_DEVCTL_MAX_READRQ_MASK;
548 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
549 pci_write_config(dev, expr_ptr + BGE_PCIE_DEVCTL, val, 2);
550 if (bootverbose)
551 printf("-> 0x%04x\n", val);
552 }
553}
554
555#ifdef notdef
556static uint32_t
557bge_readreg_ind(struct bge_softc *sc, int off)
558{
559 device_t dev;
560
561 dev = sc->bge_dev;
562
563 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
564 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
565}
566#endif
567
568static void
569bge_writereg_ind(struct bge_softc *sc, int off, int val)
570{
571 device_t dev;
572
573 dev = sc->bge_dev;
574
575 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
576 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
577}
578
579static void
580bge_writemem_direct(struct bge_softc *sc, int off, int val)
581{
582 CSR_WRITE_4(sc, off, val);
583}
584
585static void
586bge_writembx(struct bge_softc *sc, int off, int val)
587{
588 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
589 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
590
591 CSR_WRITE_4(sc, off, val);
592}
593
594/*
595 * Map a single buffer address.
596 */
597
598static void
599bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
600{
601 struct bge_dmamap_arg *ctx;
602
603 if (error)
604 return;
605
606 ctx = arg;
607
608 if (nseg > ctx->bge_maxsegs) {
609 ctx->bge_maxsegs = 0;
610 return;
611 }
612
613 ctx->bge_busaddr = segs->ds_addr;
614}
615
616static uint8_t
617bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
618{
619 uint32_t access, byte = 0;
620 int i;
621
622 /* Lock. */
623 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
624 for (i = 0; i < 8000; i++) {
625 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
626 break;
627 DELAY(20);
628 }
629 if (i == 8000)
630 return (1);
631
632 /* Enable access. */
633 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
634 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
635
636 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
637 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
638 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
639 DELAY(10);
640 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
641 DELAY(10);
642 break;
643 }
644 }
645
646 if (i == BGE_TIMEOUT * 10) {
647 if_printf(sc->bge_ifp, "nvram read timed out\n");
648 return (1);
649 }
650
651 /* Get result. */
652 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
653
654 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
655
656 /* Disable access. */
657 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
658
659 /* Unlock. */
660 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
661 CSR_READ_4(sc, BGE_NVRAM_SWARB);
662
663 return (0);
664}
665
666/*
667 * Read a sequence of bytes from NVRAM.
668 */
669static int
670bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
671{
672 int err = 0, i;
673 uint8_t byte = 0;
674
675 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
676 return (1);
677
678 for (i = 0; i < cnt; i++) {
679 err = bge_nvram_getbyte(sc, off + i, &byte);
680 if (err)
681 break;
682 *(dest + i) = byte;
683 }
684
685 return (err ? 1 : 0);
686}
687
688/*
689 * Read a byte of data stored in the EEPROM at address 'addr.' The
690 * BCM570x supports both the traditional bitbang interface and an
691 * auto access interface for reading the EEPROM. We use the auto
692 * access method.
693 */
694static uint8_t
695bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
696{
697 int i;
698 uint32_t byte = 0;
699
700 /*
701 * Enable use of auto EEPROM access so we can avoid
702 * having to use the bitbang method.
703 */
704 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
705
706 /* Reset the EEPROM, load the clock period. */
707 CSR_WRITE_4(sc, BGE_EE_ADDR,
708 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
709 DELAY(20);
710
711 /* Issue the read EEPROM command. */
712 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
713
714 /* Wait for completion */
715 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
716 DELAY(10);
717 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
718 break;
719 }
720
721 if (i == BGE_TIMEOUT * 10) {
722 device_printf(sc->bge_dev, "EEPROM read timed out\n");
723 return (1);
724 }
725
726 /* Get result. */
727 byte = CSR_READ_4(sc, BGE_EE_DATA);
728
729 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
730
731 return (0);
732}
733
734/*
735 * Read a sequence of bytes from the EEPROM.
736 */
737static int
738bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
739{
740 int i, error = 0;
741 uint8_t byte = 0;
742
743 for (i = 0; i < cnt; i++) {
744 error = bge_eeprom_getbyte(sc, off + i, &byte);
745 if (error)
746 break;
747 *(dest + i) = byte;
748 }
749
750 return (error ? 1 : 0);
751}
752
753static int
754bge_miibus_readreg(device_t dev, int phy, int reg)
755{
756 struct bge_softc *sc;
757 uint32_t val, autopoll;
758 int i;
759
760 sc = device_get_softc(dev);
761
762 /*
763 * Broadcom's own driver always assumes the internal
764 * PHY is at GMII address 1. On some chips, the PHY responds
765 * to accesses at all addresses, which could cause us to
766 * bogusly attach the PHY 32 times at probe type. Always
767 * restricting the lookup to address 1 is simpler than
768 * trying to figure out which chips revisions should be
769 * special-cased.
770 */
771 if (phy != 1)
772 return (0);
773
774 /* Reading with autopolling on may trigger PCI errors */
775 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
776 if (autopoll & BGE_MIMODE_AUTOPOLL) {
777 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
778 DELAY(40);
779 }
780
781 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
782 BGE_MIPHY(phy) | BGE_MIREG(reg));
783
784 for (i = 0; i < BGE_TIMEOUT; i++) {
785 DELAY(10);
786 val = CSR_READ_4(sc, BGE_MI_COMM);
787 if (!(val & BGE_MICOMM_BUSY))
788 break;
789 }
790
791 if (i == BGE_TIMEOUT) {
792 device_printf(sc->bge_dev,
793 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
794 phy, reg, val);
795 val = 0;
796 goto done;
797 }
798
799 DELAY(5);
800 val = CSR_READ_4(sc, BGE_MI_COMM);
801
802done:
803 if (autopoll & BGE_MIMODE_AUTOPOLL) {
804 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
805 DELAY(40);
806 }
807
808 if (val & BGE_MICOMM_READFAIL)
809 return (0);
810
811 return (val & 0xFFFF);
812}
813
814static int
815bge_miibus_writereg(device_t dev, int phy, int reg, int val)
816{
817 struct bge_softc *sc;
818 uint32_t autopoll;
819 int i;
820
821 sc = device_get_softc(dev);
822
823 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
824 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
825 return(0);
826
827 /* Reading with autopolling on may trigger PCI errors */
828 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
829 if (autopoll & BGE_MIMODE_AUTOPOLL) {
830 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
831 DELAY(40);
832 }
833
834 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
835 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
836
837 for (i = 0; i < BGE_TIMEOUT; i++) {
838 DELAY(10);
839 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
840 DELAY(5);
841 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
842 break;
843 }
844 }
845
846 if (i == BGE_TIMEOUT) {
847 device_printf(sc->bge_dev,
848 "PHY write timed out (phy %d, reg %d, val %d)\n",
849 phy, reg, val);
850 return (0);
851 }
852
853 if (autopoll & BGE_MIMODE_AUTOPOLL) {
854 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
855 DELAY(40);
856 }
857
858 return (0);
859}
860
861static void
862bge_miibus_statchg(device_t dev)
863{
864 struct bge_softc *sc;
865 struct mii_data *mii;
866 sc = device_get_softc(dev);
867 mii = device_get_softc(sc->bge_miibus);
868
869 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
870 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
871 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
872 else
873 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
874
875 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
876 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
877 else
878 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
879}
880
881/*
882 * Intialize a standard receive ring descriptor.
883 */
884static int
885bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
886{
887 struct mbuf *m_new = NULL;
888 struct bge_rx_bd *r;
889 struct bge_dmamap_arg ctx;
890 int error;
891
892 if (m == NULL) {
893 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
894 if (m_new == NULL)
895 return (ENOBUFS);
896 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
897 } else {
898 m_new = m;
899 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
900 m_new->m_data = m_new->m_ext.ext_buf;
901 }
902
903 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
904 m_adj(m_new, ETHER_ALIGN);
905 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
906 r = &sc->bge_ldata.bge_rx_std_ring[i];
907 ctx.bge_maxsegs = 1;
908 ctx.sc = sc;
909 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
910 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
911 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
912 if (error || ctx.bge_maxsegs == 0) {
913 if (m == NULL) {
914 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
915 m_freem(m_new);
916 }
917 return (ENOMEM);
918 }
919 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
920 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
921 r->bge_flags = BGE_RXBDFLAG_END;
922 r->bge_len = m_new->m_len;
923 r->bge_idx = i;
924
925 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
926 sc->bge_cdata.bge_rx_std_dmamap[i],
927 BUS_DMASYNC_PREREAD);
928
929 return (0);
930}
931
932/*
933 * Initialize a jumbo receive ring descriptor. This allocates
934 * a jumbo buffer from the pool managed internally by the driver.
935 */
936static int
937bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
938{
939 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
940 struct bge_extrx_bd *r;
941 struct mbuf *m_new = NULL;
942 int nsegs;
943 int error;
944
945 if (m == NULL) {
946 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
947 if (m_new == NULL)
948 return (ENOBUFS);
949
950 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
951 if (!(m_new->m_flags & M_EXT)) {
952 m_freem(m_new);
953 return (ENOBUFS);
954 }
955 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
956 } else {
957 m_new = m;
958 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
959 m_new->m_data = m_new->m_ext.ext_buf;
960 }
961
962 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
963 m_adj(m_new, ETHER_ALIGN);
964
965 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
966 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
967 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
968 if (error) {
969 if (m == NULL)
970 m_freem(m_new);
971 return (error);
972 }
973 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
974
975 /*
976 * Fill in the extended RX buffer descriptor.
977 */
978 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
979 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
980 r->bge_idx = i;
981 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
982 switch (nsegs) {
983 case 4:
984 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
985 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
986 r->bge_len3 = segs[3].ds_len;
987 case 3:
988 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
989 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
990 r->bge_len2 = segs[2].ds_len;
991 case 2:
992 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
993 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
994 r->bge_len1 = segs[1].ds_len;
995 case 1:
996 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
997 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
998 r->bge_len0 = segs[0].ds_len;
999 break;
1000 default:
1001 panic("%s: %d segments\n", __func__, nsegs);
1002 }
1003
1004 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1005 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1006 BUS_DMASYNC_PREREAD);
1007
1008 return (0);
1009}
1010
1011/*
1012 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1013 * that's 1MB or memory, which is a lot. For now, we fill only the first
1014 * 256 ring entries and hope that our CPU is fast enough to keep up with
1015 * the NIC.
1016 */
1017static int
1018bge_init_rx_ring_std(struct bge_softc *sc)
1019{
1020 int i;
1021
1022 for (i = 0; i < BGE_SSLOTS; i++) {
1023 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1024 return (ENOBUFS);
1025 };
1026
1027 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1028 sc->bge_cdata.bge_rx_std_ring_map,
1029 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1030
1031 sc->bge_std = i - 1;
1032 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1033
1034 return (0);
1035}
1036
1037static void
1038bge_free_rx_ring_std(struct bge_softc *sc)
1039{
1040 int i;
1041
1042 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1043 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1044 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1045 sc->bge_cdata.bge_rx_std_dmamap[i],
1046 BUS_DMASYNC_POSTREAD);
1047 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1048 sc->bge_cdata.bge_rx_std_dmamap[i]);
1049 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1050 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1051 }
1052 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1053 sizeof(struct bge_rx_bd));
1054 }
1055}
1056
1057static int
1058bge_init_rx_ring_jumbo(struct bge_softc *sc)
1059{
1060 struct bge_rcb *rcb;
1061 int i;
1062
1063 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1064 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1065 return (ENOBUFS);
1066 };
1067
1068 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1069 sc->bge_cdata.bge_rx_jumbo_ring_map,
1070 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1071
1072 sc->bge_jumbo = i - 1;
1073
1074 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1075 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1076 BGE_RCB_FLAG_USE_EXT_RX_BD);
1077 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1078
1079 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1080
1081 return (0);
1082}
1083
1084static void
1085bge_free_rx_ring_jumbo(struct bge_softc *sc)
1086{
1087 int i;
1088
1089 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1090 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1091 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1092 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1093 BUS_DMASYNC_POSTREAD);
1094 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1095 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1096 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1097 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1098 }
1099 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1100 sizeof(struct bge_extrx_bd));
1101 }
1102}
1103
1104static void
1105bge_free_tx_ring(struct bge_softc *sc)
1106{
1107 int i;
1108
1109 if (sc->bge_ldata.bge_tx_ring == NULL)
1110 return;
1111
1112 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1113 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1114 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1115 sc->bge_cdata.bge_tx_dmamap[i],
1116 BUS_DMASYNC_POSTWRITE);
1117 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1118 sc->bge_cdata.bge_tx_dmamap[i]);
1119 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1120 sc->bge_cdata.bge_tx_chain[i] = NULL;
1121 }
1122 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1123 sizeof(struct bge_tx_bd));
1124 }
1125}
1126
1127static int
1128bge_init_tx_ring(struct bge_softc *sc)
1129{
1130 sc->bge_txcnt = 0;
1131 sc->bge_tx_saved_considx = 0;
1132
1133 /* Initialize transmit producer index for host-memory send ring. */
1134 sc->bge_tx_prodidx = 0;
1135 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1136
1137 /* 5700 b2 errata */
1138 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1139 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1140
1141 /* NIC-memory send ring not used; initialize to zero. */
1142 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1143 /* 5700 b2 errata */
1144 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1145 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1146
1147 return (0);
1148}
1149
1150static void
1151bge_setpromisc(struct bge_softc *sc)
1152{
1153 struct ifnet *ifp;
1154
1155 BGE_LOCK_ASSERT(sc);
1156
1157 ifp = sc->bge_ifp;
1158
1159 /* Enable or disable promiscuous mode as needed. */
1160 if (ifp->if_flags & IFF_PROMISC)
1161 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1162 else
1163 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1164}
1165
1166static void
1167bge_setmulti(struct bge_softc *sc)
1168{
1169 struct ifnet *ifp;
1170 struct ifmultiaddr *ifma;
1171 uint32_t hashes[4] = { 0, 0, 0, 0 };
1172 int h, i;
1173
1174 BGE_LOCK_ASSERT(sc);
1175
1176 ifp = sc->bge_ifp;
1177
1178 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1179 for (i = 0; i < 4; i++)
1180 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1181 return;
1182 }
1183
1184 /* First, zot all the existing filters. */
1185 for (i = 0; i < 4; i++)
1186 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1187
1188 /* Now program new ones. */
1189 IF_ADDR_LOCK(ifp);
1190 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1191 if (ifma->ifma_addr->sa_family != AF_LINK)
1192 continue;
1193 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1194 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1195 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1196 }
1197 IF_ADDR_UNLOCK(ifp);
1198
1199 for (i = 0; i < 4; i++)
1200 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1201}
1202
1203static void
1204bge_setvlan(struct bge_softc *sc)
1205{
1206 struct ifnet *ifp;
1207
1208 BGE_LOCK_ASSERT(sc);
1209
1210 ifp = sc->bge_ifp;
1211
1212 /* Enable or disable VLAN tag stripping as needed. */
1213 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1214 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1215 else
1216 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1217}
1218
1219static void
1220bge_sig_pre_reset(sc, type)
1221 struct bge_softc *sc;
1222 int type;
1223{
1224 /*
1225 * Some chips don't like this so only do this if ASF is enabled
1226 */
1227 if (sc->bge_asf_mode)
1228 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1229
1230 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1231 switch (type) {
1232 case BGE_RESET_START:
1233 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1234 break;
1235 case BGE_RESET_STOP:
1236 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1237 break;
1238 }
1239 }
1240}
1241
1242static void
1243bge_sig_post_reset(sc, type)
1244 struct bge_softc *sc;
1245 int type;
1246{
1247 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1248 switch (type) {
1249 case BGE_RESET_START:
1250 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1251 /* START DONE */
1252 break;
1253 case BGE_RESET_STOP:
1254 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1255 break;
1256 }
1257 }
1258}
1259
1260static void
1261bge_sig_legacy(sc, type)
1262 struct bge_softc *sc;
1263 int type;
1264{
1265 if (sc->bge_asf_mode) {
1266 switch (type) {
1267 case BGE_RESET_START:
1268 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1269 break;
1270 case BGE_RESET_STOP:
1271 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1272 break;
1273 }
1274 }
1275}
1276
1277void bge_stop_fw(struct bge_softc *);
1278void
1279bge_stop_fw(sc)
1280 struct bge_softc *sc;
1281{
1282 int i;
1283
1284 if (sc->bge_asf_mode) {
1285 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1286 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1287 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1288
1289 for (i = 0; i < 100; i++ ) {
1290 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1291 break;
1292 DELAY(10);
1293 }
1294 }
1295}
1296
1297/*
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83
84#include <net/if.h>
85#include <net/if_arp.h>
86#include <net/ethernet.h>
87#include <net/if_dl.h>
88#include <net/if_media.h>
89
90#include <net/bpf.h>
91
92#include <net/if_types.h>
93#include <net/if_vlan_var.h>
94
95#include <netinet/in_systm.h>
96#include <netinet/in.h>
97#include <netinet/ip.h>
98
99#include <machine/bus.h>
100#include <machine/resource.h>
101#include <sys/bus.h>
102#include <sys/rman.h>
103
104#include <dev/mii/mii.h>
105#include <dev/mii/miivar.h>
106#include "miidevs.h"
107#include <dev/mii/brgphyreg.h>
108
109#ifdef __sparc64__
110#include <dev/ofw/ofw_bus.h>
111#include <dev/ofw/openfirm.h>
112#include <machine/ofw_machdep.h>
113#include <machine/ver.h>
114#endif
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118
119#include <dev/bge/if_bgereg.h>
120
121#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
122#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
123
124MODULE_DEPEND(bge, pci, 1, 1, 1);
125MODULE_DEPEND(bge, ether, 1, 1, 1);
126MODULE_DEPEND(bge, miibus, 1, 1, 1);
127
128/* "device miibus" required. See GENERIC if you get errors here. */
129#include "miibus_if.h"
130
131/*
132 * Various supported device vendors/types and their names. Note: the
133 * spec seems to indicate that the hardware still has Alteon's vendor
134 * ID burned into it, though it will always be overriden by the vendor
135 * ID in the EEPROM. Just to be safe, we cover all possibilities.
136 */
137static const struct bge_type {
138 uint16_t bge_vid;
139 uint16_t bge_did;
140} bge_devs[] = {
141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
142 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
143
144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
147
148 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
149
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
201
202 { SK_VENDORID, SK_DEVICEID_ALTIMA },
203
204 { TC_VENDORID, TC_DEVICEID_3C996 },
205
206 { 0, 0 }
207};
208
209static const struct bge_vendor {
210 uint16_t v_id;
211 const char *v_name;
212} bge_vendors[] = {
213 { ALTEON_VENDORID, "Alteon" },
214 { ALTIMA_VENDORID, "Altima" },
215 { APPLE_VENDORID, "Apple" },
216 { BCOM_VENDORID, "Broadcom" },
217 { SK_VENDORID, "SysKonnect" },
218 { TC_VENDORID, "3Com" },
219
220 { 0, NULL }
221};
222
223static const struct bge_revision {
224 uint32_t br_chipid;
225 const char *br_name;
226} bge_revisions[] = {
227 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
228 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
229 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
230 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
231 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
232 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
233 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
234 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
235 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
236 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
237 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
238 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
239 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
240 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
241 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
242 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
243 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
244 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
245 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
246 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
247 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
248 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
249 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
250 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
251 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
252 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
253 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
254 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
255 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
256 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
257 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
258 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
259 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
260 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
261 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
262 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
263 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
264 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
265 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
266 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
267 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
268 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
269 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
270 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
271 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
272 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
273 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
274 /* 5754 and 5787 share the same ASIC ID */
275 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
276 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
277 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
278 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
279 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
280
281 { 0, NULL }
282};
283
284/*
285 * Some defaults for major revisions, so that newer steppings
286 * that we don't know about have a shot at working.
287 */
288static const struct bge_revision bge_majorrevs[] = {
289 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
290 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
291 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
292 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
293 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
294 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
295 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
296 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
297 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
298 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
299 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
302 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
303
304 { 0, NULL }
305};
306
307#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
308#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
309#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
310#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
311#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
312
313const struct bge_revision * bge_lookup_rev(uint32_t);
314const struct bge_vendor * bge_lookup_vendor(uint16_t);
315
316typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
317
318static int bge_probe(device_t);
319static int bge_attach(device_t);
320static int bge_detach(device_t);
321static int bge_suspend(device_t);
322static int bge_resume(device_t);
323static void bge_release_resources(struct bge_softc *);
324static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
325static int bge_dma_alloc(device_t);
326static void bge_dma_free(struct bge_softc *);
327
328static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
329static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
330static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
331static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
332static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
333
334static void bge_txeof(struct bge_softc *);
335static void bge_rxeof(struct bge_softc *);
336
337static void bge_asf_driver_up (struct bge_softc *);
338static void bge_tick(void *);
339static void bge_stats_update(struct bge_softc *);
340static void bge_stats_update_regs(struct bge_softc *);
341static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
342
343static void bge_intr(void *);
344static void bge_start_locked(struct ifnet *);
345static void bge_start(struct ifnet *);
346static int bge_ioctl(struct ifnet *, u_long, caddr_t);
347static void bge_init_locked(struct bge_softc *);
348static void bge_init(void *);
349static void bge_stop(struct bge_softc *);
350static void bge_watchdog(struct bge_softc *);
351static int bge_shutdown(device_t);
352static int bge_ifmedia_upd_locked(struct ifnet *);
353static int bge_ifmedia_upd(struct ifnet *);
354static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
355
356static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
357static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
358
359static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
360static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
361
362static void bge_setpromisc(struct bge_softc *);
363static void bge_setmulti(struct bge_softc *);
364static void bge_setvlan(struct bge_softc *);
365
366static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
367static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
368static int bge_init_rx_ring_std(struct bge_softc *);
369static void bge_free_rx_ring_std(struct bge_softc *);
370static int bge_init_rx_ring_jumbo(struct bge_softc *);
371static void bge_free_rx_ring_jumbo(struct bge_softc *);
372static void bge_free_tx_ring(struct bge_softc *);
373static int bge_init_tx_ring(struct bge_softc *);
374
375static int bge_chipinit(struct bge_softc *);
376static int bge_blockinit(struct bge_softc *);
377
378static int bge_has_eaddr(struct bge_softc *);
379static uint32_t bge_readmem_ind(struct bge_softc *, int);
380static void bge_writemem_ind(struct bge_softc *, int, int);
381static void bge_writembx(struct bge_softc *, int, int);
382#ifdef notdef
383static uint32_t bge_readreg_ind(struct bge_softc *, int);
384#endif
385static void bge_writemem_direct(struct bge_softc *, int, int);
386static void bge_writereg_ind(struct bge_softc *, int, int);
387static void bge_set_max_readrq(struct bge_softc *, int);
388
389static int bge_miibus_readreg(device_t, int, int);
390static int bge_miibus_writereg(device_t, int, int, int);
391static void bge_miibus_statchg(device_t);
392#ifdef DEVICE_POLLING
393static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
394#endif
395
396#define BGE_RESET_START 1
397#define BGE_RESET_STOP 2
398static void bge_sig_post_reset(struct bge_softc *, int);
399static void bge_sig_legacy(struct bge_softc *, int);
400static void bge_sig_pre_reset(struct bge_softc *, int);
401static int bge_reset(struct bge_softc *);
402static void bge_link_upd(struct bge_softc *);
403
404/*
405 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
406 * leak information to untrusted users. It is also known to cause alignment
407 * traps on certain architectures.
408 */
409#ifdef BGE_REGISTER_DEBUG
410static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
411static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
412static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
413#endif
414static void bge_add_sysctls(struct bge_softc *);
415static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
416
417static device_method_t bge_methods[] = {
418 /* Device interface */
419 DEVMETHOD(device_probe, bge_probe),
420 DEVMETHOD(device_attach, bge_attach),
421 DEVMETHOD(device_detach, bge_detach),
422 DEVMETHOD(device_shutdown, bge_shutdown),
423 DEVMETHOD(device_suspend, bge_suspend),
424 DEVMETHOD(device_resume, bge_resume),
425
426 /* bus interface */
427 DEVMETHOD(bus_print_child, bus_generic_print_child),
428 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
429
430 /* MII interface */
431 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
432 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
433 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
434
435 { 0, 0 }
436};
437
438static driver_t bge_driver = {
439 "bge",
440 bge_methods,
441 sizeof(struct bge_softc)
442};
443
444static devclass_t bge_devclass;
445
446DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
447DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
448
449static int bge_allow_asf = 1;
450
451TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
452
453SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
454SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
455 "Allow ASF mode if available");
456
457#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
458#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
459#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
460#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
461#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
462
463static int
464bge_has_eaddr(struct bge_softc *sc)
465{
466#ifdef __sparc64__
467 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
468 device_t dev;
469 uint32_t subvendor;
470
471 dev = sc->bge_dev;
472
473 /*
474 * The on-board BGEs found in sun4u machines aren't fitted with
475 * an EEPROM which means that we have to obtain the MAC address
476 * via OFW and that some tests will always fail. We distinguish
477 * such BGEs by the subvendor ID, which also has to be obtained
478 * from OFW instead of the PCI configuration space as the latter
479 * indicates Broadcom as the subvendor of the netboot interface.
480 * For early Blade 1500 and 2500 we even have to check the OFW
481 * device path as the subvendor ID always defaults to Broadcom
482 * there.
483 */
484 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
485 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
486 subvendor == SUN_VENDORID)
487 return (0);
488 memset(buf, 0, sizeof(buf));
489 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
490 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
491 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
492 return (0);
493 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
494 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
495 return (0);
496 }
497#endif
498 return (1);
499}
500
501static uint32_t
502bge_readmem_ind(struct bge_softc *sc, int off)
503{
504 device_t dev;
505 uint32_t val;
506
507 dev = sc->bge_dev;
508
509 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
510 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
511 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
512 return (val);
513}
514
515static void
516bge_writemem_ind(struct bge_softc *sc, int off, int val)
517{
518 device_t dev;
519
520 dev = sc->bge_dev;
521
522 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
523 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
524 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
525}
526
527/*
528 * PCI Express only
529 */
530static void
531bge_set_max_readrq(struct bge_softc *sc, int expr_ptr)
532{
533 device_t dev;
534 uint16_t val;
535
536 KASSERT((sc->bge_flags & BGE_FLAG_PCIE) && expr_ptr != 0,
537 ("%s: not applicable", __func__));
538
539 dev = sc->bge_dev;
540
541 val = pci_read_config(dev, expr_ptr + BGE_PCIE_DEVCTL, 2);
542 if ((val & BGE_PCIE_DEVCTL_MAX_READRQ_MASK) !=
543 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
544 if (bootverbose)
545 device_printf(dev, "adjust device control 0x%04x ",
546 val);
547 val &= ~BGE_PCIE_DEVCTL_MAX_READRQ_MASK;
548 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
549 pci_write_config(dev, expr_ptr + BGE_PCIE_DEVCTL, val, 2);
550 if (bootverbose)
551 printf("-> 0x%04x\n", val);
552 }
553}
554
555#ifdef notdef
556static uint32_t
557bge_readreg_ind(struct bge_softc *sc, int off)
558{
559 device_t dev;
560
561 dev = sc->bge_dev;
562
563 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
564 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
565}
566#endif
567
568static void
569bge_writereg_ind(struct bge_softc *sc, int off, int val)
570{
571 device_t dev;
572
573 dev = sc->bge_dev;
574
575 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
576 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
577}
578
579static void
580bge_writemem_direct(struct bge_softc *sc, int off, int val)
581{
582 CSR_WRITE_4(sc, off, val);
583}
584
585static void
586bge_writembx(struct bge_softc *sc, int off, int val)
587{
588 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
589 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
590
591 CSR_WRITE_4(sc, off, val);
592}
593
594/*
595 * Map a single buffer address.
596 */
597
598static void
599bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
600{
601 struct bge_dmamap_arg *ctx;
602
603 if (error)
604 return;
605
606 ctx = arg;
607
608 if (nseg > ctx->bge_maxsegs) {
609 ctx->bge_maxsegs = 0;
610 return;
611 }
612
613 ctx->bge_busaddr = segs->ds_addr;
614}
615
616static uint8_t
617bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
618{
619 uint32_t access, byte = 0;
620 int i;
621
622 /* Lock. */
623 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
624 for (i = 0; i < 8000; i++) {
625 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
626 break;
627 DELAY(20);
628 }
629 if (i == 8000)
630 return (1);
631
632 /* Enable access. */
633 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
634 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
635
636 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
637 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
638 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
639 DELAY(10);
640 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
641 DELAY(10);
642 break;
643 }
644 }
645
646 if (i == BGE_TIMEOUT * 10) {
647 if_printf(sc->bge_ifp, "nvram read timed out\n");
648 return (1);
649 }
650
651 /* Get result. */
652 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
653
654 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
655
656 /* Disable access. */
657 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
658
659 /* Unlock. */
660 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
661 CSR_READ_4(sc, BGE_NVRAM_SWARB);
662
663 return (0);
664}
665
666/*
667 * Read a sequence of bytes from NVRAM.
668 */
669static int
670bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
671{
672 int err = 0, i;
673 uint8_t byte = 0;
674
675 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
676 return (1);
677
678 for (i = 0; i < cnt; i++) {
679 err = bge_nvram_getbyte(sc, off + i, &byte);
680 if (err)
681 break;
682 *(dest + i) = byte;
683 }
684
685 return (err ? 1 : 0);
686}
687
688/*
689 * Read a byte of data stored in the EEPROM at address 'addr.' The
690 * BCM570x supports both the traditional bitbang interface and an
691 * auto access interface for reading the EEPROM. We use the auto
692 * access method.
693 */
694static uint8_t
695bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
696{
697 int i;
698 uint32_t byte = 0;
699
700 /*
701 * Enable use of auto EEPROM access so we can avoid
702 * having to use the bitbang method.
703 */
704 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
705
706 /* Reset the EEPROM, load the clock period. */
707 CSR_WRITE_4(sc, BGE_EE_ADDR,
708 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
709 DELAY(20);
710
711 /* Issue the read EEPROM command. */
712 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
713
714 /* Wait for completion */
715 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
716 DELAY(10);
717 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
718 break;
719 }
720
721 if (i == BGE_TIMEOUT * 10) {
722 device_printf(sc->bge_dev, "EEPROM read timed out\n");
723 return (1);
724 }
725
726 /* Get result. */
727 byte = CSR_READ_4(sc, BGE_EE_DATA);
728
729 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
730
731 return (0);
732}
733
734/*
735 * Read a sequence of bytes from the EEPROM.
736 */
737static int
738bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
739{
740 int i, error = 0;
741 uint8_t byte = 0;
742
743 for (i = 0; i < cnt; i++) {
744 error = bge_eeprom_getbyte(sc, off + i, &byte);
745 if (error)
746 break;
747 *(dest + i) = byte;
748 }
749
750 return (error ? 1 : 0);
751}
752
753static int
754bge_miibus_readreg(device_t dev, int phy, int reg)
755{
756 struct bge_softc *sc;
757 uint32_t val, autopoll;
758 int i;
759
760 sc = device_get_softc(dev);
761
762 /*
763 * Broadcom's own driver always assumes the internal
764 * PHY is at GMII address 1. On some chips, the PHY responds
765 * to accesses at all addresses, which could cause us to
766 * bogusly attach the PHY 32 times at probe type. Always
767 * restricting the lookup to address 1 is simpler than
768 * trying to figure out which chips revisions should be
769 * special-cased.
770 */
771 if (phy != 1)
772 return (0);
773
774 /* Reading with autopolling on may trigger PCI errors */
775 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
776 if (autopoll & BGE_MIMODE_AUTOPOLL) {
777 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
778 DELAY(40);
779 }
780
781 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
782 BGE_MIPHY(phy) | BGE_MIREG(reg));
783
784 for (i = 0; i < BGE_TIMEOUT; i++) {
785 DELAY(10);
786 val = CSR_READ_4(sc, BGE_MI_COMM);
787 if (!(val & BGE_MICOMM_BUSY))
788 break;
789 }
790
791 if (i == BGE_TIMEOUT) {
792 device_printf(sc->bge_dev,
793 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
794 phy, reg, val);
795 val = 0;
796 goto done;
797 }
798
799 DELAY(5);
800 val = CSR_READ_4(sc, BGE_MI_COMM);
801
802done:
803 if (autopoll & BGE_MIMODE_AUTOPOLL) {
804 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
805 DELAY(40);
806 }
807
808 if (val & BGE_MICOMM_READFAIL)
809 return (0);
810
811 return (val & 0xFFFF);
812}
813
814static int
815bge_miibus_writereg(device_t dev, int phy, int reg, int val)
816{
817 struct bge_softc *sc;
818 uint32_t autopoll;
819 int i;
820
821 sc = device_get_softc(dev);
822
823 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
824 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
825 return(0);
826
827 /* Reading with autopolling on may trigger PCI errors */
828 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
829 if (autopoll & BGE_MIMODE_AUTOPOLL) {
830 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
831 DELAY(40);
832 }
833
834 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
835 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
836
837 for (i = 0; i < BGE_TIMEOUT; i++) {
838 DELAY(10);
839 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
840 DELAY(5);
841 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
842 break;
843 }
844 }
845
846 if (i == BGE_TIMEOUT) {
847 device_printf(sc->bge_dev,
848 "PHY write timed out (phy %d, reg %d, val %d)\n",
849 phy, reg, val);
850 return (0);
851 }
852
853 if (autopoll & BGE_MIMODE_AUTOPOLL) {
854 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
855 DELAY(40);
856 }
857
858 return (0);
859}
860
861static void
862bge_miibus_statchg(device_t dev)
863{
864 struct bge_softc *sc;
865 struct mii_data *mii;
866 sc = device_get_softc(dev);
867 mii = device_get_softc(sc->bge_miibus);
868
869 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
870 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
871 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
872 else
873 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
874
875 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
876 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
877 else
878 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
879}
880
881/*
882 * Intialize a standard receive ring descriptor.
883 */
884static int
885bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
886{
887 struct mbuf *m_new = NULL;
888 struct bge_rx_bd *r;
889 struct bge_dmamap_arg ctx;
890 int error;
891
892 if (m == NULL) {
893 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
894 if (m_new == NULL)
895 return (ENOBUFS);
896 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
897 } else {
898 m_new = m;
899 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
900 m_new->m_data = m_new->m_ext.ext_buf;
901 }
902
903 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
904 m_adj(m_new, ETHER_ALIGN);
905 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
906 r = &sc->bge_ldata.bge_rx_std_ring[i];
907 ctx.bge_maxsegs = 1;
908 ctx.sc = sc;
909 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
910 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
911 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
912 if (error || ctx.bge_maxsegs == 0) {
913 if (m == NULL) {
914 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
915 m_freem(m_new);
916 }
917 return (ENOMEM);
918 }
919 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
920 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
921 r->bge_flags = BGE_RXBDFLAG_END;
922 r->bge_len = m_new->m_len;
923 r->bge_idx = i;
924
925 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
926 sc->bge_cdata.bge_rx_std_dmamap[i],
927 BUS_DMASYNC_PREREAD);
928
929 return (0);
930}
931
932/*
933 * Initialize a jumbo receive ring descriptor. This allocates
934 * a jumbo buffer from the pool managed internally by the driver.
935 */
936static int
937bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
938{
939 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
940 struct bge_extrx_bd *r;
941 struct mbuf *m_new = NULL;
942 int nsegs;
943 int error;
944
945 if (m == NULL) {
946 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
947 if (m_new == NULL)
948 return (ENOBUFS);
949
950 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
951 if (!(m_new->m_flags & M_EXT)) {
952 m_freem(m_new);
953 return (ENOBUFS);
954 }
955 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
956 } else {
957 m_new = m;
958 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
959 m_new->m_data = m_new->m_ext.ext_buf;
960 }
961
962 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
963 m_adj(m_new, ETHER_ALIGN);
964
965 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
966 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
967 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
968 if (error) {
969 if (m == NULL)
970 m_freem(m_new);
971 return (error);
972 }
973 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
974
975 /*
976 * Fill in the extended RX buffer descriptor.
977 */
978 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
979 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
980 r->bge_idx = i;
981 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
982 switch (nsegs) {
983 case 4:
984 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
985 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
986 r->bge_len3 = segs[3].ds_len;
987 case 3:
988 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
989 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
990 r->bge_len2 = segs[2].ds_len;
991 case 2:
992 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
993 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
994 r->bge_len1 = segs[1].ds_len;
995 case 1:
996 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
997 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
998 r->bge_len0 = segs[0].ds_len;
999 break;
1000 default:
1001 panic("%s: %d segments\n", __func__, nsegs);
1002 }
1003
1004 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1005 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1006 BUS_DMASYNC_PREREAD);
1007
1008 return (0);
1009}
1010
1011/*
1012 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1013 * that's 1MB or memory, which is a lot. For now, we fill only the first
1014 * 256 ring entries and hope that our CPU is fast enough to keep up with
1015 * the NIC.
1016 */
1017static int
1018bge_init_rx_ring_std(struct bge_softc *sc)
1019{
1020 int i;
1021
1022 for (i = 0; i < BGE_SSLOTS; i++) {
1023 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1024 return (ENOBUFS);
1025 };
1026
1027 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1028 sc->bge_cdata.bge_rx_std_ring_map,
1029 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1030
1031 sc->bge_std = i - 1;
1032 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1033
1034 return (0);
1035}
1036
1037static void
1038bge_free_rx_ring_std(struct bge_softc *sc)
1039{
1040 int i;
1041
1042 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1043 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1044 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1045 sc->bge_cdata.bge_rx_std_dmamap[i],
1046 BUS_DMASYNC_POSTREAD);
1047 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1048 sc->bge_cdata.bge_rx_std_dmamap[i]);
1049 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1050 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1051 }
1052 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1053 sizeof(struct bge_rx_bd));
1054 }
1055}
1056
1057static int
1058bge_init_rx_ring_jumbo(struct bge_softc *sc)
1059{
1060 struct bge_rcb *rcb;
1061 int i;
1062
1063 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1064 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1065 return (ENOBUFS);
1066 };
1067
1068 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1069 sc->bge_cdata.bge_rx_jumbo_ring_map,
1070 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1071
1072 sc->bge_jumbo = i - 1;
1073
1074 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1075 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1076 BGE_RCB_FLAG_USE_EXT_RX_BD);
1077 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1078
1079 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1080
1081 return (0);
1082}
1083
1084static void
1085bge_free_rx_ring_jumbo(struct bge_softc *sc)
1086{
1087 int i;
1088
1089 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1090 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1091 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1092 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1093 BUS_DMASYNC_POSTREAD);
1094 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1095 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1096 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1097 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1098 }
1099 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1100 sizeof(struct bge_extrx_bd));
1101 }
1102}
1103
1104static void
1105bge_free_tx_ring(struct bge_softc *sc)
1106{
1107 int i;
1108
1109 if (sc->bge_ldata.bge_tx_ring == NULL)
1110 return;
1111
1112 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1113 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1114 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1115 sc->bge_cdata.bge_tx_dmamap[i],
1116 BUS_DMASYNC_POSTWRITE);
1117 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1118 sc->bge_cdata.bge_tx_dmamap[i]);
1119 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1120 sc->bge_cdata.bge_tx_chain[i] = NULL;
1121 }
1122 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1123 sizeof(struct bge_tx_bd));
1124 }
1125}
1126
1127static int
1128bge_init_tx_ring(struct bge_softc *sc)
1129{
1130 sc->bge_txcnt = 0;
1131 sc->bge_tx_saved_considx = 0;
1132
1133 /* Initialize transmit producer index for host-memory send ring. */
1134 sc->bge_tx_prodidx = 0;
1135 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1136
1137 /* 5700 b2 errata */
1138 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1139 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1140
1141 /* NIC-memory send ring not used; initialize to zero. */
1142 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1143 /* 5700 b2 errata */
1144 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1145 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1146
1147 return (0);
1148}
1149
1150static void
1151bge_setpromisc(struct bge_softc *sc)
1152{
1153 struct ifnet *ifp;
1154
1155 BGE_LOCK_ASSERT(sc);
1156
1157 ifp = sc->bge_ifp;
1158
1159 /* Enable or disable promiscuous mode as needed. */
1160 if (ifp->if_flags & IFF_PROMISC)
1161 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1162 else
1163 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1164}
1165
1166static void
1167bge_setmulti(struct bge_softc *sc)
1168{
1169 struct ifnet *ifp;
1170 struct ifmultiaddr *ifma;
1171 uint32_t hashes[4] = { 0, 0, 0, 0 };
1172 int h, i;
1173
1174 BGE_LOCK_ASSERT(sc);
1175
1176 ifp = sc->bge_ifp;
1177
1178 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1179 for (i = 0; i < 4; i++)
1180 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1181 return;
1182 }
1183
1184 /* First, zot all the existing filters. */
1185 for (i = 0; i < 4; i++)
1186 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1187
1188 /* Now program new ones. */
1189 IF_ADDR_LOCK(ifp);
1190 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1191 if (ifma->ifma_addr->sa_family != AF_LINK)
1192 continue;
1193 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1194 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1195 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1196 }
1197 IF_ADDR_UNLOCK(ifp);
1198
1199 for (i = 0; i < 4; i++)
1200 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1201}
1202
1203static void
1204bge_setvlan(struct bge_softc *sc)
1205{
1206 struct ifnet *ifp;
1207
1208 BGE_LOCK_ASSERT(sc);
1209
1210 ifp = sc->bge_ifp;
1211
1212 /* Enable or disable VLAN tag stripping as needed. */
1213 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1214 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1215 else
1216 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1217}
1218
1219static void
1220bge_sig_pre_reset(sc, type)
1221 struct bge_softc *sc;
1222 int type;
1223{
1224 /*
1225 * Some chips don't like this so only do this if ASF is enabled
1226 */
1227 if (sc->bge_asf_mode)
1228 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1229
1230 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1231 switch (type) {
1232 case BGE_RESET_START:
1233 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1234 break;
1235 case BGE_RESET_STOP:
1236 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1237 break;
1238 }
1239 }
1240}
1241
1242static void
1243bge_sig_post_reset(sc, type)
1244 struct bge_softc *sc;
1245 int type;
1246{
1247 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1248 switch (type) {
1249 case BGE_RESET_START:
1250 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1251 /* START DONE */
1252 break;
1253 case BGE_RESET_STOP:
1254 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1255 break;
1256 }
1257 }
1258}
1259
1260static void
1261bge_sig_legacy(sc, type)
1262 struct bge_softc *sc;
1263 int type;
1264{
1265 if (sc->bge_asf_mode) {
1266 switch (type) {
1267 case BGE_RESET_START:
1268 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1269 break;
1270 case BGE_RESET_STOP:
1271 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1272 break;
1273 }
1274 }
1275}
1276
1277void bge_stop_fw(struct bge_softc *);
1278void
1279bge_stop_fw(sc)
1280 struct bge_softc *sc;
1281{
1282 int i;
1283
1284 if (sc->bge_asf_mode) {
1285 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1286 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1287 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1288
1289 for (i = 0; i < 100; i++ ) {
1290 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1291 break;
1292 DELAY(10);
1293 }
1294 }
1295}
1296
1297/*
1298 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1299 * self-test results.
1298 * Do endian, PCI and DMA initialization.
1300 */
1301static int
1302bge_chipinit(struct bge_softc *sc)
1303{
1304 uint32_t dma_rw_ctl;
1305 int i;
1306
1307 /* Set endianness before we access any non-PCI registers. */
1308 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1309
1310 /* Clear the MAC control register */
1311 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1312
1313 /*
1314 * Clear the MAC statistics block in the NIC's
1315 * internal memory.
1316 */
1317 for (i = BGE_STATS_BLOCK;
1318 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1319 BGE_MEMWIN_WRITE(sc, i, 0);
1320
1321 for (i = BGE_STATUS_BLOCK;
1322 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1323 BGE_MEMWIN_WRITE(sc, i, 0);
1324
1325 /*
1326 * Set up the PCI DMA control register.
1327 */
1328 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1329 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1330 if (sc->bge_flags & BGE_FLAG_PCIE) {
1331 /* Read watermark not used, 128 bytes for write. */
1332 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1333 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1334 if (BGE_IS_5714_FAMILY(sc)) {
1335 /* 256 bytes for read and write. */
1336 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1337 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1338 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1339 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1340 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1341 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1342 /* 1536 bytes for read, 384 bytes for write. */
1343 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1344 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1345 } else {
1346 /* 384 bytes for read and write. */
1347 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1348 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1349 0x0F;
1350 }
1351 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1352 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1353 uint32_t tmp;
1354
1355 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1356 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1357 if (tmp == 6 || tmp == 7)
1358 dma_rw_ctl |=
1359 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1360
1361 /* Set PCI-X DMA write workaround. */
1362 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1363 }
1364 } else {
1365 /* Conventional PCI bus: 256 bytes for read and write. */
1366 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1367 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1368
1369 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1370 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1371 dma_rw_ctl |= 0x0F;
1372 }
1373 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1374 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1375 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1376 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1377 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1378 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1379 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1380 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1381
1382 /*
1383 * Set up general mode register.
1384 */
1385 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1386 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1387 BGE_MODECTL_TX_NO_PHDR_CSUM);
1388
1389 /*
1390 * BCM5701 B5 have a bug causing data corruption when using
1391 * 64-bit DMA reads, which can be terminated early and then
1392 * completed later as 32-bit accesses, in combination with
1393 * certain bridges.
1394 */
1395 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1396 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1397 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1398
1399 /*
1400 * Tell the firmware the driver is running
1401 */
1402 if (sc->bge_asf_mode & ASF_STACKUP)
1403 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1404
1405 /*
1406 * Disable memory write invalidate. Apparently it is not supported
1299 */
1300static int
1301bge_chipinit(struct bge_softc *sc)
1302{
1303 uint32_t dma_rw_ctl;
1304 int i;
1305
1306 /* Set endianness before we access any non-PCI registers. */
1307 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1308
1309 /* Clear the MAC control register */
1310 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1311
1312 /*
1313 * Clear the MAC statistics block in the NIC's
1314 * internal memory.
1315 */
1316 for (i = BGE_STATS_BLOCK;
1317 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1318 BGE_MEMWIN_WRITE(sc, i, 0);
1319
1320 for (i = BGE_STATUS_BLOCK;
1321 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1322 BGE_MEMWIN_WRITE(sc, i, 0);
1323
1324 /*
1325 * Set up the PCI DMA control register.
1326 */
1327 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1328 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1329 if (sc->bge_flags & BGE_FLAG_PCIE) {
1330 /* Read watermark not used, 128 bytes for write. */
1331 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1332 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1333 if (BGE_IS_5714_FAMILY(sc)) {
1334 /* 256 bytes for read and write. */
1335 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1336 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1337 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1338 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1339 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1340 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1341 /* 1536 bytes for read, 384 bytes for write. */
1342 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1343 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1344 } else {
1345 /* 384 bytes for read and write. */
1346 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1347 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1348 0x0F;
1349 }
1350 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1351 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1352 uint32_t tmp;
1353
1354 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1355 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1356 if (tmp == 6 || tmp == 7)
1357 dma_rw_ctl |=
1358 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1359
1360 /* Set PCI-X DMA write workaround. */
1361 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1362 }
1363 } else {
1364 /* Conventional PCI bus: 256 bytes for read and write. */
1365 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1366 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1367
1368 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1369 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1370 dma_rw_ctl |= 0x0F;
1371 }
1372 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1373 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1374 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1375 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1376 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1377 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1378 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1379 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1380
1381 /*
1382 * Set up general mode register.
1383 */
1384 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1385 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1386 BGE_MODECTL_TX_NO_PHDR_CSUM);
1387
1388 /*
1389 * BCM5701 B5 have a bug causing data corruption when using
1390 * 64-bit DMA reads, which can be terminated early and then
1391 * completed later as 32-bit accesses, in combination with
1392 * certain bridges.
1393 */
1394 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1395 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1396 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1397
1398 /*
1399 * Tell the firmware the driver is running
1400 */
1401 if (sc->bge_asf_mode & ASF_STACKUP)
1402 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1403
1404 /*
1405 * Disable memory write invalidate. Apparently it is not supported
1407 * properly by these devices.
1406 * properly by these devices. Also ensure that INTx isn't disabled,
1407 * as these chips need it even when using MSI.
1408 */
1408 */
1409 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1409 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1410 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1410
1411 /* Set the timer prescaler (always 66Mhz) */
1412 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1413
1414 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1415 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1416 DELAY(40); /* XXX */
1417
1418 /* Put PHY into ready state */
1419 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1420 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1421 DELAY(40);
1422 }
1423
1424 return (0);
1425}
1426
1427static int
1428bge_blockinit(struct bge_softc *sc)
1429{
1430 struct bge_rcb *rcb;
1431 bus_size_t vrcb;
1432 bge_hostaddr taddr;
1433 uint32_t val;
1434 int i;
1435
1436 /*
1437 * Initialize the memory window pointer register so that
1438 * we can access the first 32K of internal NIC RAM. This will
1439 * allow us to set up the TX send ring RCBs and the RX return
1440 * ring RCBs, plus other things which live in NIC memory.
1441 */
1442 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1443
1444 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1445
1446 if (!(BGE_IS_5705_PLUS(sc))) {
1447 /* Configure mbuf memory pool */
1448 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1449 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1450 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1451 else
1452 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1453
1454 /* Configure DMA resource pool */
1455 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1456 BGE_DMA_DESCRIPTORS);
1457 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1458 }
1459
1460 /* Configure mbuf pool watermarks */
1461 if (!BGE_IS_5705_PLUS(sc)) {
1462 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1465 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1466 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1469 } else {
1470 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1473 }
1474
1475 /* Configure DMA resource watermarks */
1476 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1477 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1478
1479 /* Enable buffer manager */
1480 if (!(BGE_IS_5705_PLUS(sc))) {
1481 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1482 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1483
1484 /* Poll for buffer manager start indication */
1485 for (i = 0; i < BGE_TIMEOUT; i++) {
1486 DELAY(10);
1487 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1488 break;
1489 }
1490
1491 if (i == BGE_TIMEOUT) {
1492 device_printf(sc->bge_dev,
1493 "buffer manager failed to start\n");
1494 return (ENXIO);
1495 }
1496 }
1497
1498 /* Enable flow-through queues */
1499 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1500 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1501
1502 /* Wait until queue initialization is complete */
1503 for (i = 0; i < BGE_TIMEOUT; i++) {
1504 DELAY(10);
1505 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1506 break;
1507 }
1508
1509 if (i == BGE_TIMEOUT) {
1510 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1511 return (ENXIO);
1512 }
1513
1514 /* Initialize the standard RX ring control block */
1515 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1516 rcb->bge_hostaddr.bge_addr_lo =
1517 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1518 rcb->bge_hostaddr.bge_addr_hi =
1519 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1520 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1521 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1522 if (BGE_IS_5705_PLUS(sc))
1523 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1524 else
1525 rcb->bge_maxlen_flags =
1526 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1527 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1528 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1529 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1530
1531 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1532 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1533
1534 /*
1535 * Initialize the jumbo RX ring control block
1536 * We set the 'ring disabled' bit in the flags
1537 * field until we're actually ready to start
1538 * using this ring (i.e. once we set the MTU
1539 * high enough to require it).
1540 */
1541 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1542 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1543
1544 rcb->bge_hostaddr.bge_addr_lo =
1545 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1546 rcb->bge_hostaddr.bge_addr_hi =
1547 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1548 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1549 sc->bge_cdata.bge_rx_jumbo_ring_map,
1550 BUS_DMASYNC_PREREAD);
1551 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1552 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1553 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1554 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1555 rcb->bge_hostaddr.bge_addr_hi);
1556 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1557 rcb->bge_hostaddr.bge_addr_lo);
1558
1559 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1560 rcb->bge_maxlen_flags);
1561 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1562
1563 /* Set up dummy disabled mini ring RCB */
1564 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1565 rcb->bge_maxlen_flags =
1566 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1567 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1568 rcb->bge_maxlen_flags);
1569 }
1570
1571 /*
1572 * Set the BD ring replentish thresholds. The recommended
1573 * values are 1/8th the number of descriptors allocated to
1574 * each ring.
1575 * XXX The 5754 requires a lower threshold, so it might be a
1576 * requirement of all 575x family chips. The Linux driver sets
1577 * the lower threshold for all 5705 family chips as well, but there
1578 * are reports that it might not need to be so strict.
1579 *
1580 * XXX Linux does some extra fiddling here for the 5906 parts as
1581 * well.
1582 */
1583 if (BGE_IS_5705_PLUS(sc))
1584 val = 8;
1585 else
1586 val = BGE_STD_RX_RING_CNT / 8;
1587 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1588 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1589
1590 /*
1591 * Disable all unused send rings by setting the 'ring disabled'
1592 * bit in the flags field of all the TX send ring control blocks.
1593 * These are located in NIC memory.
1594 */
1595 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1596 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1597 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1598 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1599 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1600 vrcb += sizeof(struct bge_rcb);
1601 }
1602
1603 /* Configure TX RCB 0 (we use only the first ring) */
1604 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1605 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1606 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1607 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1608 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1609 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1610 if (!(BGE_IS_5705_PLUS(sc)))
1611 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1612 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1613
1614 /* Disable all unused RX return rings */
1615 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1616 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1617 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1618 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1619 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1620 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1621 BGE_RCB_FLAG_RING_DISABLED));
1622 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1623 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1624 (i * (sizeof(uint64_t))), 0);
1625 vrcb += sizeof(struct bge_rcb);
1626 }
1627
1628 /* Initialize RX ring indexes */
1629 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1630 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1631 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1632
1633 /*
1634 * Set up RX return ring 0
1635 * Note that the NIC address for RX return rings is 0x00000000.
1636 * The return rings live entirely within the host, so the
1637 * nicaddr field in the RCB isn't used.
1638 */
1639 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1640 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1641 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1642 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1643 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1644 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1645 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1646
1647 /* Set random backoff seed for TX */
1648 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1649 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1650 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1651 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1652 BGE_TX_BACKOFF_SEED_MASK);
1653
1654 /* Set inter-packet gap */
1655 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1656
1657 /*
1658 * Specify which ring to use for packets that don't match
1659 * any RX rules.
1660 */
1661 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1662
1663 /*
1664 * Configure number of RX lists. One interrupt distribution
1665 * list, sixteen active lists, one bad frames class.
1666 */
1667 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1668
1669 /* Inialize RX list placement stats mask. */
1670 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1671 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1672
1673 /* Disable host coalescing until we get it set up */
1674 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1675
1676 /* Poll to make sure it's shut down. */
1677 for (i = 0; i < BGE_TIMEOUT; i++) {
1678 DELAY(10);
1679 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1680 break;
1681 }
1682
1683 if (i == BGE_TIMEOUT) {
1684 device_printf(sc->bge_dev,
1685 "host coalescing engine failed to idle\n");
1686 return (ENXIO);
1687 }
1688
1689 /* Set up host coalescing defaults */
1690 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1691 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1692 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1693 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1694 if (!(BGE_IS_5705_PLUS(sc))) {
1695 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1696 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1697 }
1698 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1699 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1700
1701 /* Set up address of statistics block */
1702 if (!(BGE_IS_5705_PLUS(sc))) {
1703 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1704 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1705 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1706 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1707 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1708 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1709 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1710 }
1711
1712 /* Set up address of status block */
1713 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1714 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1715 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1716 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1717 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1718 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1719
1720 /* Turn on host coalescing state machine */
1721 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1722
1723 /* Turn on RX BD completion state machine and enable attentions */
1724 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1725 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1726
1727 /* Turn on RX list placement state machine */
1728 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1729
1730 /* Turn on RX list selector state machine. */
1731 if (!(BGE_IS_5705_PLUS(sc)))
1732 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1733
1734 /* Turn on DMA, clear stats */
1735 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB |
1736 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR |
1737 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB |
1738 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB |
1739 ((sc->bge_flags & BGE_FLAG_TBI) ?
1740 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1741
1742 /* Set misc. local control, enable interrupts on attentions */
1743 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1744
1745#ifdef notdef
1746 /* Assert GPIO pins for PHY reset */
1747 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1748 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1749 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1750 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1751#endif
1752
1753 /* Turn on DMA completion state machine */
1754 if (!(BGE_IS_5705_PLUS(sc)))
1755 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1756
1757 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1758
1759 /* Enable host coalescing bug fix. */
1760 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1761 sc->bge_asicrev == BGE_ASICREV_BCM5787)
1762 val |= 1 << 29;
1763
1764 /* Turn on write DMA state machine */
1765 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1766 DELAY(40);
1767
1768 /* Turn on read DMA state machine */
1769 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1770 if (sc->bge_flags & BGE_FLAG_PCIE)
1771 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1772 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1773 DELAY(40);
1774
1775 /* Turn on RX data completion state machine */
1776 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1777
1778 /* Turn on RX BD initiator state machine */
1779 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1780
1781 /* Turn on RX data and RX BD initiator state machine */
1782 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1783
1784 /* Turn on Mbuf cluster free state machine */
1785 if (!(BGE_IS_5705_PLUS(sc)))
1786 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1787
1788 /* Turn on send BD completion state machine */
1789 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1790
1791 /* Turn on send data completion state machine */
1792 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1793
1794 /* Turn on send data initiator state machine */
1795 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1796
1797 /* Turn on send BD initiator state machine */
1798 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1799
1800 /* Turn on send BD selector state machine */
1801 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1802
1803 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1804 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1805 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1806
1807 /* ack/clear link change events */
1808 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1809 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1810 BGE_MACSTAT_LINK_CHANGED);
1811 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1812
1813 /* Enable PHY auto polling (for MII/GMII only) */
1814 if (sc->bge_flags & BGE_FLAG_TBI) {
1815 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1816 } else {
1817 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1818 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1819 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1820 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1821 BGE_EVTENB_MI_INTERRUPT);
1822 }
1823
1824 /*
1825 * Clear any pending link state attention.
1826 * Otherwise some link state change events may be lost until attention
1827 * is cleared by bge_intr() -> bge_link_upd() sequence.
1828 * It's not necessary on newer BCM chips - perhaps enabling link
1829 * state change attentions implies clearing pending attention.
1830 */
1831 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1832 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1833 BGE_MACSTAT_LINK_CHANGED);
1834
1835 /* Enable link state change attentions. */
1836 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1837
1838 return (0);
1839}
1840
1841const struct bge_revision *
1842bge_lookup_rev(uint32_t chipid)
1843{
1844 const struct bge_revision *br;
1845
1846 for (br = bge_revisions; br->br_name != NULL; br++) {
1847 if (br->br_chipid == chipid)
1848 return (br);
1849 }
1850
1851 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1852 if (br->br_chipid == BGE_ASICREV(chipid))
1853 return (br);
1854 }
1855
1856 return (NULL);
1857}
1858
1859const struct bge_vendor *
1860bge_lookup_vendor(uint16_t vid)
1861{
1862 const struct bge_vendor *v;
1863
1864 for (v = bge_vendors; v->v_name != NULL; v++)
1865 if (v->v_id == vid)
1866 return (v);
1867
1868 panic("%s: unknown vendor %d", __func__, vid);
1869 return (NULL);
1870}
1871
1872/*
1873 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1874 * against our list and return its name if we find a match.
1875 *
1876 * Note that since the Broadcom controller contains VPD support, we
1877 * try to get the device name string from the controller itself instead
1878 * of the compiled-in string. It guarantees we'll always announce the
1879 * right product name. We fall back to the compiled-in string when
1880 * VPD is unavailable or corrupt.
1881 */
1882static int
1883bge_probe(device_t dev)
1884{
1885 const struct bge_type *t = bge_devs;
1886 struct bge_softc *sc = device_get_softc(dev);
1887 uint16_t vid, did;
1888
1889 sc->bge_dev = dev;
1890 vid = pci_get_vendor(dev);
1891 did = pci_get_device(dev);
1892 while(t->bge_vid != 0) {
1893 if ((vid == t->bge_vid) && (did == t->bge_did)) {
1894 char model[64], buf[96];
1895 const struct bge_revision *br;
1896 const struct bge_vendor *v;
1897 uint32_t id;
1898
1899 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1900 BGE_PCIMISCCTL_ASICREV;
1901 br = bge_lookup_rev(id);
1902 v = bge_lookup_vendor(vid);
1903 {
1904#if __FreeBSD_version > 700024
1905 const char *pname;
1906
1907 if (bge_has_eaddr(sc) &&
1908 pci_get_vpd_ident(dev, &pname) == 0)
1909 snprintf(model, 64, "%s", pname);
1910 else
1911#endif
1912 snprintf(model, 64, "%s %s",
1913 v->v_name,
1914 br != NULL ? br->br_name :
1915 "NetXtreme Ethernet Controller");
1916 }
1917 snprintf(buf, 96, "%s, %sASIC rev. %#04x", model,
1918 br != NULL ? "" : "unknown ", id >> 16);
1919 device_set_desc_copy(dev, buf);
1920 if (pci_get_subvendor(dev) == DELL_VENDORID)
1921 sc->bge_flags |= BGE_FLAG_NO_3LED;
1922 if (did == BCOM_DEVICEID_BCM5755M)
1923 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1924 return (0);
1925 }
1926 t++;
1927 }
1928
1929 return (ENXIO);
1930}
1931
1932static void
1933bge_dma_free(struct bge_softc *sc)
1934{
1935 int i;
1936
1937 /* Destroy DMA maps for RX buffers. */
1938 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1939 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1940 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1941 sc->bge_cdata.bge_rx_std_dmamap[i]);
1942 }
1943
1944 /* Destroy DMA maps for jumbo RX buffers. */
1945 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1946 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1947 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1948 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1949 }
1950
1951 /* Destroy DMA maps for TX buffers. */
1952 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1953 if (sc->bge_cdata.bge_tx_dmamap[i])
1954 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1955 sc->bge_cdata.bge_tx_dmamap[i]);
1956 }
1957
1958 if (sc->bge_cdata.bge_mtag)
1959 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1960
1961
1962 /* Destroy standard RX ring. */
1963 if (sc->bge_cdata.bge_rx_std_ring_map)
1964 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1965 sc->bge_cdata.bge_rx_std_ring_map);
1966 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1967 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1968 sc->bge_ldata.bge_rx_std_ring,
1969 sc->bge_cdata.bge_rx_std_ring_map);
1970
1971 if (sc->bge_cdata.bge_rx_std_ring_tag)
1972 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1973
1974 /* Destroy jumbo RX ring. */
1975 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1976 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1977 sc->bge_cdata.bge_rx_jumbo_ring_map);
1978
1979 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1980 sc->bge_ldata.bge_rx_jumbo_ring)
1981 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1982 sc->bge_ldata.bge_rx_jumbo_ring,
1983 sc->bge_cdata.bge_rx_jumbo_ring_map);
1984
1985 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1986 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1987
1988 /* Destroy RX return ring. */
1989 if (sc->bge_cdata.bge_rx_return_ring_map)
1990 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1991 sc->bge_cdata.bge_rx_return_ring_map);
1992
1993 if (sc->bge_cdata.bge_rx_return_ring_map &&
1994 sc->bge_ldata.bge_rx_return_ring)
1995 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1996 sc->bge_ldata.bge_rx_return_ring,
1997 sc->bge_cdata.bge_rx_return_ring_map);
1998
1999 if (sc->bge_cdata.bge_rx_return_ring_tag)
2000 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2001
2002 /* Destroy TX ring. */
2003 if (sc->bge_cdata.bge_tx_ring_map)
2004 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2005 sc->bge_cdata.bge_tx_ring_map);
2006
2007 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2008 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2009 sc->bge_ldata.bge_tx_ring,
2010 sc->bge_cdata.bge_tx_ring_map);
2011
2012 if (sc->bge_cdata.bge_tx_ring_tag)
2013 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2014
2015 /* Destroy status block. */
2016 if (sc->bge_cdata.bge_status_map)
2017 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2018 sc->bge_cdata.bge_status_map);
2019
2020 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2021 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2022 sc->bge_ldata.bge_status_block,
2023 sc->bge_cdata.bge_status_map);
2024
2025 if (sc->bge_cdata.bge_status_tag)
2026 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2027
2028 /* Destroy statistics block. */
2029 if (sc->bge_cdata.bge_stats_map)
2030 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2031 sc->bge_cdata.bge_stats_map);
2032
2033 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2034 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2035 sc->bge_ldata.bge_stats,
2036 sc->bge_cdata.bge_stats_map);
2037
2038 if (sc->bge_cdata.bge_stats_tag)
2039 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2040
2041 /* Destroy the parent tag. */
2042 if (sc->bge_cdata.bge_parent_tag)
2043 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2044}
2045
2046static int
2047bge_dma_alloc(device_t dev)
2048{
2049 struct bge_dmamap_arg ctx;
2050 struct bge_softc *sc;
2051 int i, error;
2052
2053 sc = device_get_softc(dev);
2054
2055 /*
2056 * Allocate the parent bus DMA tag appropriate for PCI.
2057 */
2058 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2059 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2060 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2061 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2062
2063 if (error != 0) {
2064 device_printf(sc->bge_dev,
2065 "could not allocate parent dma tag\n");
2066 return (ENOMEM);
2067 }
2068
2069 /*
2070 * Create tag for mbufs.
2071 */
2072 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
2073 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2074 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
2075 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
2076
2077 if (error) {
2078 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2079 return (ENOMEM);
2080 }
2081
2082 /* Create DMA maps for RX buffers. */
2083 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2084 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2085 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2086 if (error) {
2087 device_printf(sc->bge_dev,
2088 "can't create DMA map for RX\n");
2089 return (ENOMEM);
2090 }
2091 }
2092
2093 /* Create DMA maps for TX buffers. */
2094 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2095 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2096 &sc->bge_cdata.bge_tx_dmamap[i]);
2097 if (error) {
2098 device_printf(sc->bge_dev,
2099 "can't create DMA map for RX\n");
2100 return (ENOMEM);
2101 }
2102 }
2103
2104 /* Create tag for standard RX ring. */
2105 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2106 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2107 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
2108 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
2109
2110 if (error) {
2111 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2112 return (ENOMEM);
2113 }
2114
2115 /* Allocate DMA'able memory for standard RX ring. */
2116 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2117 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2118 &sc->bge_cdata.bge_rx_std_ring_map);
2119 if (error)
2120 return (ENOMEM);
2121
2122 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2123
2124 /* Load the address of the standard RX ring. */
2125 ctx.bge_maxsegs = 1;
2126 ctx.sc = sc;
2127
2128 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2129 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2130 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2131
2132 if (error)
2133 return (ENOMEM);
2134
2135 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2136
2137 /* Create tags for jumbo mbufs. */
2138 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2139 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2140 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2141 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2142 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2143 if (error) {
2144 device_printf(sc->bge_dev,
2145 "could not allocate jumbo dma tag\n");
2146 return (ENOMEM);
2147 }
2148
2149 /* Create tag for jumbo RX ring. */
2150 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2151 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2152 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2153 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2154
2155 if (error) {
2156 device_printf(sc->bge_dev,
2157 "could not allocate jumbo ring dma tag\n");
2158 return (ENOMEM);
2159 }
2160
2161 /* Allocate DMA'able memory for jumbo RX ring. */
2162 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2163 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
2164 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2165 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2166 if (error)
2167 return (ENOMEM);
2168
2169 /* Load the address of the jumbo RX ring. */
2170 ctx.bge_maxsegs = 1;
2171 ctx.sc = sc;
2172
2173 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2174 sc->bge_cdata.bge_rx_jumbo_ring_map,
2175 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2176 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2177
2178 if (error)
2179 return (ENOMEM);
2180
2181 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2182
2183 /* Create DMA maps for jumbo RX buffers. */
2184 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2185 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2186 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2187 if (error) {
2188 device_printf(sc->bge_dev,
2189 "can't create DMA map for jumbo RX\n");
2190 return (ENOMEM);
2191 }
2192 }
2193
2194 }
2195
2196 /* Create tag for RX return ring. */
2197 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2198 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2199 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2200 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2201
2202 if (error) {
2203 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2204 return (ENOMEM);
2205 }
2206
2207 /* Allocate DMA'able memory for RX return ring. */
2208 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2209 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2210 &sc->bge_cdata.bge_rx_return_ring_map);
2211 if (error)
2212 return (ENOMEM);
2213
2214 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2215 BGE_RX_RTN_RING_SZ(sc));
2216
2217 /* Load the address of the RX return ring. */
2218 ctx.bge_maxsegs = 1;
2219 ctx.sc = sc;
2220
2221 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2222 sc->bge_cdata.bge_rx_return_ring_map,
2223 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2224 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2225
2226 if (error)
2227 return (ENOMEM);
2228
2229 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2230
2231 /* Create tag for TX ring. */
2232 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2233 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2234 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2235 &sc->bge_cdata.bge_tx_ring_tag);
2236
2237 if (error) {
2238 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2239 return (ENOMEM);
2240 }
2241
2242 /* Allocate DMA'able memory for TX ring. */
2243 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2244 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2245 &sc->bge_cdata.bge_tx_ring_map);
2246 if (error)
2247 return (ENOMEM);
2248
2249 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2250
2251 /* Load the address of the TX ring. */
2252 ctx.bge_maxsegs = 1;
2253 ctx.sc = sc;
2254
2255 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2256 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2257 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2258
2259 if (error)
2260 return (ENOMEM);
2261
2262 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2263
2264 /* Create tag for status block. */
2265 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2266 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2267 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2268 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2269
2270 if (error) {
2271 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2272 return (ENOMEM);
2273 }
2274
2275 /* Allocate DMA'able memory for status block. */
2276 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2277 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2278 &sc->bge_cdata.bge_status_map);
2279 if (error)
2280 return (ENOMEM);
2281
2282 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2283
2284 /* Load the address of the status block. */
2285 ctx.sc = sc;
2286 ctx.bge_maxsegs = 1;
2287
2288 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2289 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2290 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2291
2292 if (error)
2293 return (ENOMEM);
2294
2295 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2296
2297 /* Create tag for statistics block. */
2298 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2299 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2300 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2301 &sc->bge_cdata.bge_stats_tag);
2302
2303 if (error) {
2304 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2305 return (ENOMEM);
2306 }
2307
2308 /* Allocate DMA'able memory for statistics block. */
2309 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2310 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2311 &sc->bge_cdata.bge_stats_map);
2312 if (error)
2313 return (ENOMEM);
2314
2315 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2316
2317 /* Load the address of the statstics block. */
2318 ctx.sc = sc;
2319 ctx.bge_maxsegs = 1;
2320
2321 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2322 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2323 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2324
2325 if (error)
2326 return (ENOMEM);
2327
2328 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2329
2330 return (0);
2331}
2332
2333#if __FreeBSD_version > 602105
2334/*
2335 * Return true if this device has more than one port.
2336 */
2337static int
2338bge_has_multiple_ports(struct bge_softc *sc)
2339{
2340 device_t dev = sc->bge_dev;
2341 u_int b, d, f, fscan, s;
2342
2343 d = pci_get_domain(dev);
2344 b = pci_get_bus(dev);
2345 s = pci_get_slot(dev);
2346 f = pci_get_function(dev);
2347 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2348 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2349 return (1);
2350 return (0);
2351}
2352
2353/*
2354 * Return true if MSI can be used with this device.
2355 */
2356static int
2357bge_can_use_msi(struct bge_softc *sc)
2358{
2359 int can_use_msi = 0;
2360
2361 switch (sc->bge_asicrev) {
2362 case BGE_ASICREV_BCM5714_A0:
2363 case BGE_ASICREV_BCM5714:
2364 /*
2365 * Apparently, MSI doesn't work when these chips are
2366 * configured in single-port mode.
2367 */
2368 if (bge_has_multiple_ports(sc))
2369 can_use_msi = 1;
2370 break;
2371 case BGE_ASICREV_BCM5750:
2372 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2373 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2374 can_use_msi = 1;
2375 break;
2376 default:
2377 if (BGE_IS_575X_PLUS(sc))
2378 can_use_msi = 1;
2379 }
2380 return (can_use_msi);
2381}
2382#endif
2383
2384static int
2385bge_attach(device_t dev)
2386{
2387 struct ifnet *ifp;
2388 struct bge_softc *sc;
2389 uint32_t hwcfg = 0, misccfg;
2390 u_char eaddr[ETHER_ADDR_LEN];
2391 int error, reg, rid, trys;
2392
2393 sc = device_get_softc(dev);
2394 sc->bge_dev = dev;
2395
2396 /*
2397 * Map control/status registers.
2398 */
2399 pci_enable_busmaster(dev);
2400
2401 rid = BGE_PCI_BAR0;
2402 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2403 RF_ACTIVE);
2404
2405 if (sc->bge_res == NULL) {
2406 device_printf (sc->bge_dev, "couldn't map memory\n");
2407 error = ENXIO;
2408 goto fail;
2409 }
2410
2411 /* Save various chip information. */
2412 sc->bge_chipid =
2413 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2414 BGE_PCIMISCCTL_ASICREV;
2415 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2416 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2417
2418 /*
2419 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2420 * 5705 A0 and A1 chips.
2421 */
2422 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2423 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2424 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2425 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2426 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2427
2428 if (bge_has_eaddr(sc))
2429 sc->bge_flags |= BGE_FLAG_EADDR;
2430
2431 /* Save chipset family. */
2432 switch (sc->bge_asicrev) {
2433 case BGE_ASICREV_BCM5700:
2434 case BGE_ASICREV_BCM5701:
2435 case BGE_ASICREV_BCM5703:
2436 case BGE_ASICREV_BCM5704:
2437 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2438 break;
2439 case BGE_ASICREV_BCM5714_A0:
2440 case BGE_ASICREV_BCM5780:
2441 case BGE_ASICREV_BCM5714:
2442 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2443 /* FALLTHRU */
2444 case BGE_ASICREV_BCM5750:
2445 case BGE_ASICREV_BCM5752:
2446 case BGE_ASICREV_BCM5755:
2447 case BGE_ASICREV_BCM5787:
2448 case BGE_ASICREV_BCM5906:
2449 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2450 /* FALLTHRU */
2451 case BGE_ASICREV_BCM5705:
2452 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2453 break;
2454 }
2455
2456 /* Set various bug flags. */
2457 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2458 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2459 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2460 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2461 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2462 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2463 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2464 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2465 if (BGE_IS_5705_PLUS(sc) &&
2466 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2467 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2468 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2469 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
2470 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2471 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2472 sc->bge_flags |= BGE_FLAG_BER_BUG;
2473 }
2474
2475
2476 /*
2477 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2478 * but I do not know the DEVICEID for the 5788M.
2479 */
2480 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2481 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2482 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2483 sc->bge_flags |= BGE_FLAG_5788;
2484
2485 /*
2486 * Check if this is a PCI-X or PCI Express device.
2487 */
2488#if __FreeBSD_version > 602101
2489 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2490 /*
2491 * Found a PCI Express capabilities register, this
2492 * must be a PCI Express device.
2493 */
2494 if (reg != 0) {
2495 sc->bge_flags |= BGE_FLAG_PCIE;
2496#else
2497 if (BGE_IS_5705_PLUS(sc)) {
2498 reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2499 if ((reg & 0xFF) == BGE_PCIE_CAPID) {
2500 sc->bge_flags |= BGE_FLAG_PCIE;
2501 reg = BGE_PCIE_CAPID;
2502#endif
2503 bge_set_max_readrq(sc, reg);
2504 }
2505 } else {
2506 /*
2507 * Check if the device is in PCI-X Mode.
2508 * (This bit is not valid on PCI Express controllers.)
2509 */
2510 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2511 BGE_PCISTATE_PCI_BUSMODE) == 0)
2512 sc->bge_flags |= BGE_FLAG_PCIX;
2513 }
2514
2515#if __FreeBSD_version > 602105
2516 {
2517 int msicount;
2518
2519 /*
2520 * Allocate the interrupt, using MSI if possible. These devices
2521 * support 8 MSI messages, but only the first one is used in
2522 * normal operation.
2523 */
2524 if (bge_can_use_msi(sc)) {
2525 msicount = pci_msi_count(dev);
2526 if (msicount > 1)
2527 msicount = 1;
2528 } else
2529 msicount = 0;
2530 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2531 rid = 1;
2532 sc->bge_flags |= BGE_FLAG_MSI;
2533 } else
2534 rid = 0;
2535 }
2536#else
2537 rid = 0;
2538#endif
2539
2540 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2541 RF_SHAREABLE | RF_ACTIVE);
2542
2543 if (sc->bge_irq == NULL) {
2544 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2545 error = ENXIO;
2546 goto fail;
2547 }
2548
2549 if (bootverbose)
2550 device_printf(dev,
2551 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2552 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2553 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2554 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2555
2556 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2557
2558 /* Try to reset the chip. */
2559 if (bge_reset(sc)) {
2560 device_printf(sc->bge_dev, "chip reset failed\n");
2561 error = ENXIO;
2562 goto fail;
2563 }
2564
2565 sc->bge_asf_mode = 0;
2566 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2567 == BGE_MAGIC_NUMBER)) {
2568 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2569 & BGE_HWCFG_ASF) {
2570 sc->bge_asf_mode |= ASF_ENABLE;
2571 sc->bge_asf_mode |= ASF_STACKUP;
2572 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2573 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2574 }
2575 }
2576 }
2577
2578 /* Try to reset the chip again the nice way. */
2579 bge_stop_fw(sc);
2580 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2581 if (bge_reset(sc)) {
2582 device_printf(sc->bge_dev, "chip reset failed\n");
2583 error = ENXIO;
2584 goto fail;
2585 }
2586
2587 bge_sig_legacy(sc, BGE_RESET_STOP);
2588 bge_sig_post_reset(sc, BGE_RESET_STOP);
2589
2590 if (bge_chipinit(sc)) {
2591 device_printf(sc->bge_dev, "chip initialization failed\n");
2592 error = ENXIO;
2593 goto fail;
2594 }
2595
2596 error = bge_get_eaddr(sc, eaddr);
2597 if (error) {
2598 device_printf(sc->bge_dev,
2599 "failed to read station address\n");
2600 error = ENXIO;
2601 goto fail;
2602 }
2603
2604 /* 5705 limits RX return ring to 512 entries. */
2605 if (BGE_IS_5705_PLUS(sc))
2606 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2607 else
2608 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2609
2610 if (bge_dma_alloc(dev)) {
2611 device_printf(sc->bge_dev,
2612 "failed to allocate DMA resources\n");
2613 error = ENXIO;
2614 goto fail;
2615 }
2616
2617 /* Set default tuneable values. */
2618 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2619 sc->bge_rx_coal_ticks = 150;
2620 sc->bge_tx_coal_ticks = 150;
2621 sc->bge_rx_max_coal_bds = 10;
2622 sc->bge_tx_max_coal_bds = 10;
2623
2624 /* Set up ifnet structure */
2625 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2626 if (ifp == NULL) {
2627 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2628 error = ENXIO;
2629 goto fail;
2630 }
2631 ifp->if_softc = sc;
2632 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2633 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2634 ifp->if_ioctl = bge_ioctl;
2635 ifp->if_start = bge_start;
2636 ifp->if_init = bge_init;
2637 ifp->if_mtu = ETHERMTU;
2638 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2639 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2640 IFQ_SET_READY(&ifp->if_snd);
2641 ifp->if_hwassist = BGE_CSUM_FEATURES;
2642 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2643 IFCAP_VLAN_MTU;
2644#ifdef IFCAP_VLAN_HWCSUM
2645 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2646#endif
2647 ifp->if_capenable = ifp->if_capabilities;
2648#ifdef DEVICE_POLLING
2649 ifp->if_capabilities |= IFCAP_POLLING;
2650#endif
2651
2652 /*
2653 * 5700 B0 chips do not support checksumming correctly due
2654 * to hardware bugs.
2655 */
2656 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2657 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2658 ifp->if_capenable &= IFCAP_HWCSUM;
2659 ifp->if_hwassist = 0;
2660 }
2661
2662 /*
2663 * Figure out what sort of media we have by checking the
2664 * hardware config word in the first 32k of NIC internal memory,
2665 * or fall back to examining the EEPROM if necessary.
2666 * Note: on some BCM5700 cards, this value appears to be unset.
2667 * If that's the case, we have to rely on identifying the NIC
2668 * by its PCI subsystem ID, as we do below for the SysKonnect
2669 * SK-9D41.
2670 */
2671 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2672 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2673 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2674 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2675 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2676 sizeof(hwcfg))) {
2677 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2678 error = ENXIO;
2679 goto fail;
2680 }
2681 hwcfg = ntohl(hwcfg);
2682 }
2683
2684 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2685 sc->bge_flags |= BGE_FLAG_TBI;
2686
2687 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2688 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2689 sc->bge_flags |= BGE_FLAG_TBI;
2690
2691 if (sc->bge_flags & BGE_FLAG_TBI) {
2692 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2693 bge_ifmedia_sts);
2694 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2695 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2696 0, NULL);
2697 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2698 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2699 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2700 } else {
2701 /*
2702 * Do transceiver setup and tell the firmware the
2703 * driver is down so we can try to get access the
2704 * probe if ASF is running. Retry a couple of times
2705 * if we get a conflict with the ASF firmware accessing
2706 * the PHY.
2707 */
2708 trys = 0;
2709 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2710again:
2711 bge_asf_driver_up(sc);
2712
2713 if (mii_phy_probe(dev, &sc->bge_miibus,
2714 bge_ifmedia_upd, bge_ifmedia_sts)) {
2715 if (trys++ < 4) {
2716 device_printf(sc->bge_dev, "Try again\n");
2717 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2718 BMCR_RESET);
2719 goto again;
2720 }
2721
2722 device_printf(sc->bge_dev, "MII without any PHY!\n");
2723 error = ENXIO;
2724 goto fail;
2725 }
2726
2727 /*
2728 * Now tell the firmware we are going up after probing the PHY
2729 */
2730 if (sc->bge_asf_mode & ASF_STACKUP)
2731 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2732 }
2733
2734 /*
2735 * When using the BCM5701 in PCI-X mode, data corruption has
2736 * been observed in the first few bytes of some received packets.
2737 * Aligning the packet buffer in memory eliminates the corruption.
2738 * Unfortunately, this misaligns the packet payloads. On platforms
2739 * which do not support unaligned accesses, we will realign the
2740 * payloads by copying the received packets.
2741 */
2742 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2743 sc->bge_flags & BGE_FLAG_PCIX)
2744 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2745
2746 /*
2747 * Call MI attach routine.
2748 */
2749 ether_ifattach(ifp, eaddr);
2750 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2751
2752 /*
2753 * Hookup IRQ last.
2754 */
2755#if __FreeBSD_version > 700030
2756 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2757 NULL, bge_intr, sc, &sc->bge_intrhand);
2758#else
2759 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2760 bge_intr, sc, &sc->bge_intrhand);
2761#endif
2762
2763 if (error) {
2764 bge_detach(dev);
2765 device_printf(sc->bge_dev, "couldn't set up irq\n");
2766 }
2767
2768 bge_add_sysctls(sc);
2769
2770 return (0);
2771
2772fail:
2773 bge_release_resources(sc);
2774
2775 return (error);
2776}
2777
2778static int
2779bge_detach(device_t dev)
2780{
2781 struct bge_softc *sc;
2782 struct ifnet *ifp;
2783
2784 sc = device_get_softc(dev);
2785 ifp = sc->bge_ifp;
2786
2787#ifdef DEVICE_POLLING
2788 if (ifp->if_capenable & IFCAP_POLLING)
2789 ether_poll_deregister(ifp);
2790#endif
2791
2792 BGE_LOCK(sc);
2793 bge_stop(sc);
2794 bge_reset(sc);
2795 BGE_UNLOCK(sc);
2796
2797 callout_drain(&sc->bge_stat_ch);
2798
2799 ether_ifdetach(ifp);
2800
2801 if (sc->bge_flags & BGE_FLAG_TBI) {
2802 ifmedia_removeall(&sc->bge_ifmedia);
2803 } else {
2804 bus_generic_detach(dev);
2805 device_delete_child(dev, sc->bge_miibus);
2806 }
2807
2808 bge_release_resources(sc);
2809
2810 return (0);
2811}
2812
2813static void
2814bge_release_resources(struct bge_softc *sc)
2815{
2816 device_t dev;
2817
2818 dev = sc->bge_dev;
2819
2820 if (sc->bge_intrhand != NULL)
2821 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2822
2823 if (sc->bge_irq != NULL)
2824 bus_release_resource(dev, SYS_RES_IRQ,
2825 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2826
2827#if __FreeBSD_version > 602105
2828 if (sc->bge_flags & BGE_FLAG_MSI)
2829 pci_release_msi(dev);
2830#endif
2831
2832 if (sc->bge_res != NULL)
2833 bus_release_resource(dev, SYS_RES_MEMORY,
2834 BGE_PCI_BAR0, sc->bge_res);
2835
2836 if (sc->bge_ifp != NULL)
2837 if_free(sc->bge_ifp);
2838
2839 bge_dma_free(sc);
2840
2841 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2842 BGE_LOCK_DESTROY(sc);
2843}
2844
2845static int
2846bge_reset(struct bge_softc *sc)
2847{
2848 device_t dev;
2849 uint32_t cachesize, command, pcistate, reset, val;
2850 void (*write_op)(struct bge_softc *, int, int);
2851 int i;
2852
2853 dev = sc->bge_dev;
2854
2855 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2856 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2857 if (sc->bge_flags & BGE_FLAG_PCIE)
2858 write_op = bge_writemem_direct;
2859 else
2860 write_op = bge_writemem_ind;
2861 } else
2862 write_op = bge_writereg_ind;
2863
2864 /* Save some important PCI state. */
2865 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2866 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2867 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2868
2869 pci_write_config(dev, BGE_PCI_MISC_CTL,
2870 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2871 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2872
2873 /* Disable fastboot on controllers that support it. */
2874 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2875 sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2876 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2877 if (bootverbose)
2878 device_printf(sc->bge_dev, "Disabling fastboot\n");
2879 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2880 }
2881
2882 /*
2883 * Write the magic number to SRAM at offset 0xB50.
2884 * When firmware finishes its initialization it will
2885 * write ~BGE_MAGIC_NUMBER to the same location.
2886 */
2887 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2888
2889 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
2890
2891 /* XXX: Broadcom Linux driver. */
2892 if (sc->bge_flags & BGE_FLAG_PCIE) {
2893 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
2894 CSR_WRITE_4(sc, 0x7E2C, 0x20);
2895 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2896 /* Prevent PCIE link training during global reset */
2897 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
2898 reset |= 1 << 29;
2899 }
2900 }
2901
2902 /*
2903 * Set GPHY Power Down Override to leave GPHY
2904 * powered up in D0 uninitialized.
2905 */
2906 if (BGE_IS_5705_PLUS(sc))
2907 reset |= 0x04000000;
2908
2909 /* Issue global reset */
2910 write_op(sc, BGE_MISC_CFG, reset);
2911
2912 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2913 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2914 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2915 val | BGE_VCPU_STATUS_DRV_RESET);
2916 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2917 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2918 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2919 }
2920
2921 DELAY(1000);
2922
2923 /* XXX: Broadcom Linux driver. */
2924 if (sc->bge_flags & BGE_FLAG_PCIE) {
2925 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2926 DELAY(500000); /* wait for link training to complete */
2927 val = pci_read_config(dev, 0xC4, 4);
2928 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
2929 }
2930 /*
2931 * Set PCIE max payload size to 128 bytes and clear error
2932 * status.
2933 */
2934 pci_write_config(dev, 0xD8, 0xF5000, 4);
2935 }
2936
2937 /* Reset some of the PCI state that got zapped by reset. */
2938 pci_write_config(dev, BGE_PCI_MISC_CTL,
2939 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2940 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2941 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2942 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2943 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
2944
2945 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
2946 if (BGE_IS_5714_FAMILY(sc)) {
2947 /* This chip disables MSI on reset. */
2948 if (sc->bge_flags & BGE_FLAG_MSI) {
2949 val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
2950 pci_write_config(dev, BGE_PCI_MSI_CTL,
2951 val | PCIM_MSICTRL_MSI_ENABLE, 2);
2952 val = CSR_READ_4(sc, BGE_MSI_MODE);
2953 CSR_WRITE_4(sc, BGE_MSI_MODE,
2954 val | BGE_MSIMODE_ENABLE);
2955 }
2956 val = CSR_READ_4(sc, BGE_MARB_MODE);
2957 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2958 } else
2959 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2960
2961 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2962 for (i = 0; i < BGE_TIMEOUT; i++) {
2963 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2964 if (val & BGE_VCPU_STATUS_INIT_DONE)
2965 break;
2966 DELAY(100);
2967 }
2968 if (i == BGE_TIMEOUT) {
2969 device_printf(sc->bge_dev, "reset timed out\n");
2970 return (1);
2971 }
2972 } else {
2973 /*
2974 * Poll until we see the 1's complement of the magic number.
2975 * This indicates that the firmware initialization is complete.
2976 * We expect this to fail if no chip containing the Ethernet
2977 * address is fitted though.
2978 */
2979 for (i = 0; i < BGE_TIMEOUT; i++) {
2980 DELAY(10);
2981 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2982 if (val == ~BGE_MAGIC_NUMBER)
2983 break;
2984 }
2985
2986 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
2987 device_printf(sc->bge_dev, "firmware handshake timed out, "
2988 "found 0x%08x\n", val);
2989 }
2990
2991 /*
2992 * XXX Wait for the value of the PCISTATE register to
2993 * return to its original pre-reset state. This is a
2994 * fairly good indicator of reset completion. If we don't
2995 * wait for the reset to fully complete, trying to read
2996 * from the device's non-PCI registers may yield garbage
2997 * results.
2998 */
2999 for (i = 0; i < BGE_TIMEOUT; i++) {
3000 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3001 break;
3002 DELAY(10);
3003 }
3004
3005 if (sc->bge_flags & BGE_FLAG_PCIE) {
3006 reset = bge_readmem_ind(sc, 0x7C00);
3007 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25));
3008 }
3009
3010 /* Fix up byte swapping. */
3011 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3012 BGE_MODECTL_BYTESWAP_DATA);
3013
3014 /* Tell the ASF firmware we are up */
3015 if (sc->bge_asf_mode & ASF_STACKUP)
3016 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3017
3018 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3019
3020 /*
3021 * The 5704 in TBI mode apparently needs some special
3022 * adjustment to insure the SERDES drive level is set
3023 * to 1.2V.
3024 */
3025 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3026 sc->bge_flags & BGE_FLAG_TBI) {
3027 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3028 val = (val & ~0xFFF) | 0x880;
3029 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3030 }
3031
3032 /* XXX: Broadcom Linux driver. */
3033 if (sc->bge_flags & BGE_FLAG_PCIE &&
3034 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3035 val = CSR_READ_4(sc, 0x7C00);
3036 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3037 }
3038 DELAY(10000);
3039
3040 return(0);
3041}
3042
3043/*
3044 * Frame reception handling. This is called if there's a frame
3045 * on the receive return list.
3046 *
3047 * Note: we have to be able to handle two possibilities here:
3048 * 1) the frame is from the jumbo receive ring
3049 * 2) the frame is from the standard receive ring
3050 */
3051
3052static void
3053bge_rxeof(struct bge_softc *sc)
3054{
3055 struct ifnet *ifp;
3056 int stdcnt = 0, jumbocnt = 0;
3057
3058 BGE_LOCK_ASSERT(sc);
3059
3060 /* Nothing to do. */
3061 if (sc->bge_rx_saved_considx ==
3062 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
3063 return;
3064
3065 ifp = sc->bge_ifp;
3066
3067 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3068 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3069 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3070 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
3071 if (BGE_IS_JUMBO_CAPABLE(sc))
3072 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3073 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
3074
3075 while(sc->bge_rx_saved_considx !=
3076 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
3077 struct bge_rx_bd *cur_rx;
3078 uint32_t rxidx;
3079 struct mbuf *m = NULL;
3080 uint16_t vlan_tag = 0;
3081 int have_tag = 0;
3082
3083#ifdef DEVICE_POLLING
3084 if (ifp->if_capenable & IFCAP_POLLING) {
3085 if (sc->rxcycles <= 0)
3086 break;
3087 sc->rxcycles--;
3088 }
3089#endif
3090
3091 cur_rx =
3092 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
3093
3094 rxidx = cur_rx->bge_idx;
3095 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
3096
3097 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3098 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3099 have_tag = 1;
3100 vlan_tag = cur_rx->bge_vlan_tag;
3101 }
3102
3103 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3104 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3105 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
3106 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
3107 BUS_DMASYNC_POSTREAD);
3108 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
3109 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
3110 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3111 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3112 jumbocnt++;
3113 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3114 ifp->if_ierrors++;
3115 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3116 continue;
3117 }
3118 if (bge_newbuf_jumbo(sc,
3119 sc->bge_jumbo, NULL) == ENOBUFS) {
3120 ifp->if_ierrors++;
3121 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3122 continue;
3123 }
3124 } else {
3125 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3126 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3127 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
3128 BUS_DMASYNC_POSTREAD);
3129 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3130 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
3131 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3132 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3133 stdcnt++;
3134 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3135 ifp->if_ierrors++;
3136 bge_newbuf_std(sc, sc->bge_std, m);
3137 continue;
3138 }
3139 if (bge_newbuf_std(sc, sc->bge_std,
3140 NULL) == ENOBUFS) {
3141 ifp->if_ierrors++;
3142 bge_newbuf_std(sc, sc->bge_std, m);
3143 continue;
3144 }
3145 }
3146
3147 ifp->if_ipackets++;
3148#ifndef __NO_STRICT_ALIGNMENT
3149 /*
3150 * For architectures with strict alignment we must make sure
3151 * the payload is aligned.
3152 */
3153 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3154 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3155 cur_rx->bge_len);
3156 m->m_data += ETHER_ALIGN;
3157 }
3158#endif
3159 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3160 m->m_pkthdr.rcvif = ifp;
3161
3162 if (ifp->if_capenable & IFCAP_RXCSUM) {
3163 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3164 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3165 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3166 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3167 }
3168 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3169 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3170 m->m_pkthdr.csum_data =
3171 cur_rx->bge_tcp_udp_csum;
3172 m->m_pkthdr.csum_flags |=
3173 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3174 }
3175 }
3176
3177 /*
3178 * If we received a packet with a vlan tag,
3179 * attach that information to the packet.
3180 */
3181 if (have_tag) {
3182#if __FreeBSD_version > 700022
3183 m->m_pkthdr.ether_vtag = vlan_tag;
3184 m->m_flags |= M_VLANTAG;
3185#else
3186 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3187 if (m == NULL)
3188 continue;
3189#endif
3190 }
3191
3192 BGE_UNLOCK(sc);
3193 (*ifp->if_input)(ifp, m);
3194 BGE_LOCK(sc);
3195 }
3196
3197 if (stdcnt > 0)
3198 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3199 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3200
3201 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
3202 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3203 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3204
3205 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3206 if (stdcnt)
3207 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3208 if (jumbocnt)
3209 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3210#ifdef notyet
3211 /*
3212 * This register wraps very quickly under heavy packet drops.
3213 * If you need correct statistics, you can enable this check.
3214 */
3215 if (BGE_IS_5705_PLUS(sc))
3216 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3217#endif
3218}
3219
3220static void
3221bge_txeof(struct bge_softc *sc)
3222{
3223 struct bge_tx_bd *cur_tx = NULL;
3224 struct ifnet *ifp;
3225
3226 BGE_LOCK_ASSERT(sc);
3227
3228 /* Nothing to do. */
3229 if (sc->bge_tx_saved_considx ==
3230 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
3231 return;
3232
3233 ifp = sc->bge_ifp;
3234
3235 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3236 sc->bge_cdata.bge_tx_ring_map,
3237 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3238 /*
3239 * Go through our tx ring and free mbufs for those
3240 * frames that have been sent.
3241 */
3242 while (sc->bge_tx_saved_considx !=
3243 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
3244 uint32_t idx = 0;
3245
3246 idx = sc->bge_tx_saved_considx;
3247 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3248 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3249 ifp->if_opackets++;
3250 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3251 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3252 sc->bge_cdata.bge_tx_dmamap[idx],
3253 BUS_DMASYNC_POSTWRITE);
3254 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3255 sc->bge_cdata.bge_tx_dmamap[idx]);
3256 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3257 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3258 }
3259 sc->bge_txcnt--;
3260 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3261 }
3262
3263 if (cur_tx != NULL)
3264 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3265 if (sc->bge_txcnt == 0)
3266 sc->bge_timer = 0;
3267}
3268
3269#ifdef DEVICE_POLLING
3270static void
3271bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3272{
3273 struct bge_softc *sc = ifp->if_softc;
3274 uint32_t statusword;
3275
3276 BGE_LOCK(sc);
3277 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3278 BGE_UNLOCK(sc);
3279 return;
3280 }
3281
3282 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3283 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3284
3285 statusword = atomic_readandclear_32(
3286 &sc->bge_ldata.bge_status_block->bge_status);
3287
3288 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3289 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3290
3291 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3292 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3293 sc->bge_link_evt++;
3294
3295 if (cmd == POLL_AND_CHECK_STATUS)
3296 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3297 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3298 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3299 bge_link_upd(sc);
3300
3301 sc->rxcycles = count;
3302 bge_rxeof(sc);
3303 bge_txeof(sc);
3304 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3305 bge_start_locked(ifp);
3306
3307 BGE_UNLOCK(sc);
3308}
3309#endif /* DEVICE_POLLING */
3310
3311static void
3312bge_intr(void *xsc)
3313{
3314 struct bge_softc *sc;
3315 struct ifnet *ifp;
3316 uint32_t statusword;
3317
3318 sc = xsc;
3319
3320 BGE_LOCK(sc);
3321
3322 ifp = sc->bge_ifp;
3323
3324#ifdef DEVICE_POLLING
3325 if (ifp->if_capenable & IFCAP_POLLING) {
3326 BGE_UNLOCK(sc);
3327 return;
3328 }
3329#endif
3330
3331 /*
3332 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3333 * disable interrupts by writing nonzero like we used to, since with
3334 * our current organization this just gives complications and
3335 * pessimizations for re-enabling interrupts. We used to have races
3336 * instead of the necessary complications. Disabling interrupts
3337 * would just reduce the chance of a status update while we are
3338 * running (by switching to the interrupt-mode coalescence
3339 * parameters), but this chance is already very low so it is more
3340 * efficient to get another interrupt than prevent it.
3341 *
3342 * We do the ack first to ensure another interrupt if there is a
3343 * status update after the ack. We don't check for the status
3344 * changing later because it is more efficient to get another
3345 * interrupt than prevent it, not quite as above (not checking is
3346 * a smaller optimization than not toggling the interrupt enable,
3347 * since checking doesn't involve PCI accesses and toggling require
3348 * the status check). So toggling would probably be a pessimization
3349 * even with MSI. It would only be needed for using a task queue.
3350 */
3351 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3352
3353 /*
3354 * Do the mandatory PCI flush as well as get the link status.
3355 */
3356 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3357
3358 /* Make sure the descriptor ring indexes are coherent. */
3359 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3360 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3361 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3362 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3363
3364 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3365 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3366 statusword || sc->bge_link_evt)
3367 bge_link_upd(sc);
3368
3369 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3370 /* Check RX return ring producer/consumer. */
3371 bge_rxeof(sc);
3372
3373 /* Check TX ring producer/consumer. */
3374 bge_txeof(sc);
3375 }
3376
3377 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3378 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3379 bge_start_locked(ifp);
3380
3381 BGE_UNLOCK(sc);
3382}
3383
3384static void
3385bge_asf_driver_up(struct bge_softc *sc)
3386{
3387 if (sc->bge_asf_mode & ASF_STACKUP) {
3388 /* Send ASF heartbeat aprox. every 2s */
3389 if (sc->bge_asf_count)
3390 sc->bge_asf_count --;
3391 else {
3392 sc->bge_asf_count = 5;
3393 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3394 BGE_FW_DRV_ALIVE);
3395 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3396 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3397 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3398 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3399 }
3400 }
3401}
3402
3403static void
3404bge_tick(void *xsc)
3405{
3406 struct bge_softc *sc = xsc;
3407 struct mii_data *mii = NULL;
3408
3409 BGE_LOCK_ASSERT(sc);
3410
3411 /* Synchronize with possible callout reset/stop. */
3412 if (callout_pending(&sc->bge_stat_ch) ||
3413 !callout_active(&sc->bge_stat_ch))
3414 return;
3415
3416 if (BGE_IS_5705_PLUS(sc))
3417 bge_stats_update_regs(sc);
3418 else
3419 bge_stats_update(sc);
3420
3421 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3422 mii = device_get_softc(sc->bge_miibus);
3423 /*
3424 * Do not touch PHY if we have link up. This could break
3425 * IPMI/ASF mode or produce extra input errors
3426 * (extra errors was reported for bcm5701 & bcm5704).
3427 */
3428 if (!sc->bge_link)
3429 mii_tick(mii);
3430 } else {
3431 /*
3432 * Since in TBI mode auto-polling can't be used we should poll
3433 * link status manually. Here we register pending link event
3434 * and trigger interrupt.
3435 */
3436#ifdef DEVICE_POLLING
3437 /* In polling mode we poll link state in bge_poll(). */
3438 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3439#endif
3440 {
3441 sc->bge_link_evt++;
3442 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3443 sc->bge_flags & BGE_FLAG_5788)
3444 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3445 else
3446 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3447 }
3448 }
3449
3450 bge_asf_driver_up(sc);
3451 bge_watchdog(sc);
3452
3453 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3454}
3455
3456static void
3457bge_stats_update_regs(struct bge_softc *sc)
3458{
3459 struct ifnet *ifp;
3460
3461 ifp = sc->bge_ifp;
3462
3463 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3464 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3465
3466 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3467}
3468
3469static void
3470bge_stats_update(struct bge_softc *sc)
3471{
3472 struct ifnet *ifp;
3473 bus_size_t stats;
3474 uint32_t cnt; /* current register value */
3475
3476 ifp = sc->bge_ifp;
3477
3478 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3479
3480#define READ_STAT(sc, stats, stat) \
3481 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3482
3483 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3484 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3485 sc->bge_tx_collisions = cnt;
3486
3487 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3488 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3489 sc->bge_rx_discards = cnt;
3490
3491 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3492 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3493 sc->bge_tx_discards = cnt;
3494
3495#undef READ_STAT
3496}
3497
3498/*
3499 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3500 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3501 * but when such padded frames employ the bge IP/TCP checksum offload,
3502 * the hardware checksum assist gives incorrect results (possibly
3503 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3504 * If we pad such runts with zeros, the onboard checksum comes out correct.
3505 */
3506static __inline int
3507bge_cksum_pad(struct mbuf *m)
3508{
3509 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3510 struct mbuf *last;
3511
3512 /* If there's only the packet-header and we can pad there, use it. */
3513 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3514 M_TRAILINGSPACE(m) >= padlen) {
3515 last = m;
3516 } else {
3517 /*
3518 * Walk packet chain to find last mbuf. We will either
3519 * pad there, or append a new mbuf and pad it.
3520 */
3521 for (last = m; last->m_next != NULL; last = last->m_next);
3522 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3523 /* Allocate new empty mbuf, pad it. Compact later. */
3524 struct mbuf *n;
3525
3526 MGET(n, M_DONTWAIT, MT_DATA);
3527 if (n == NULL)
3528 return (ENOBUFS);
3529 n->m_len = 0;
3530 last->m_next = n;
3531 last = n;
3532 }
3533 }
3534
3535 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3536 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3537 last->m_len += padlen;
3538 m->m_pkthdr.len += padlen;
3539
3540 return (0);
3541}
3542
3543/*
3544 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3545 * pointers to descriptors.
3546 */
3547static int
3548bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3549{
3550 bus_dma_segment_t segs[BGE_NSEG_NEW];
3551 bus_dmamap_t map;
3552 struct bge_tx_bd *d;
3553 struct mbuf *m = *m_head;
3554 uint32_t idx = *txidx;
3555 uint16_t csum_flags;
3556 int nsegs, i, error;
3557
3558 csum_flags = 0;
3559 if (m->m_pkthdr.csum_flags) {
3560 if (m->m_pkthdr.csum_flags & CSUM_IP)
3561 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3562 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3563 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3564 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3565 (error = bge_cksum_pad(m)) != 0) {
3566 m_freem(m);
3567 *m_head = NULL;
3568 return (error);
3569 }
3570 }
3571 if (m->m_flags & M_LASTFRAG)
3572 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3573 else if (m->m_flags & M_FRAG)
3574 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3575 }
3576
3577 map = sc->bge_cdata.bge_tx_dmamap[idx];
3578 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3579 &nsegs, BUS_DMA_NOWAIT);
3580 if (error == EFBIG) {
3581 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
3582 if (m == NULL) {
3583 m_freem(*m_head);
3584 *m_head = NULL;
3585 return (ENOBUFS);
3586 }
3587 *m_head = m;
3588 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3589 segs, &nsegs, BUS_DMA_NOWAIT);
3590 if (error) {
3591 m_freem(m);
3592 *m_head = NULL;
3593 return (error);
3594 }
3595 } else if (error != 0)
3596 return (error);
3597
3598 /*
3599 * Sanity check: avoid coming within 16 descriptors
3600 * of the end of the ring.
3601 */
3602 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3603 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3604 return (ENOBUFS);
3605 }
3606
3607 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3608
3609 for (i = 0; ; i++) {
3610 d = &sc->bge_ldata.bge_tx_ring[idx];
3611 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3612 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3613 d->bge_len = segs[i].ds_len;
3614 d->bge_flags = csum_flags;
3615 if (i == nsegs - 1)
3616 break;
3617 BGE_INC(idx, BGE_TX_RING_CNT);
3618 }
3619
3620 /* Mark the last segment as end of packet... */
3621 d->bge_flags |= BGE_TXBDFLAG_END;
3622
3623 /* ... and put VLAN tag into first segment. */
3624 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3625#if __FreeBSD_version > 700022
3626 if (m->m_flags & M_VLANTAG) {
3627 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3628 d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3629 } else
3630 d->bge_vlan_tag = 0;
3631#else
3632 {
3633 struct m_tag *mtag;
3634
3635 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
3636 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3637 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3638 } else
3639 d->bge_vlan_tag = 0;
3640 }
3641#endif
3642
3643 /*
3644 * Insure that the map for this transmission
3645 * is placed at the array index of the last descriptor
3646 * in this chain.
3647 */
3648 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3649 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3650 sc->bge_cdata.bge_tx_chain[idx] = m;
3651 sc->bge_txcnt += nsegs;
3652
3653 BGE_INC(idx, BGE_TX_RING_CNT);
3654 *txidx = idx;
3655
3656 return (0);
3657}
3658
3659/*
3660 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3661 * to the mbuf data regions directly in the transmit descriptors.
3662 */
3663static void
3664bge_start_locked(struct ifnet *ifp)
3665{
3666 struct bge_softc *sc;
3667 struct mbuf *m_head = NULL;
3668 uint32_t prodidx;
3669 int count = 0;
3670
3671 sc = ifp->if_softc;
3672
3673 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3674 return;
3675
3676 prodidx = sc->bge_tx_prodidx;
3677
3678 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3679 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3680 if (m_head == NULL)
3681 break;
3682
3683 /*
3684 * XXX
3685 * The code inside the if() block is never reached since we
3686 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3687 * requests to checksum TCP/UDP in a fragmented packet.
3688 *
3689 * XXX
3690 * safety overkill. If this is a fragmented packet chain
3691 * with delayed TCP/UDP checksums, then only encapsulate
3692 * it if we have enough descriptors to handle the entire
3693 * chain at once.
3694 * (paranoia -- may not actually be needed)
3695 */
3696 if (m_head->m_flags & M_FIRSTFRAG &&
3697 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3698 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3699 m_head->m_pkthdr.csum_data + 16) {
3700 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3701 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3702 break;
3703 }
3704 }
3705
3706 /*
3707 * Pack the data into the transmit ring. If we
3708 * don't have room, set the OACTIVE flag and wait
3709 * for the NIC to drain the ring.
3710 */
3711 if (bge_encap(sc, &m_head, &prodidx)) {
3712 if (m_head == NULL)
3713 break;
3714 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3715 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3716 break;
3717 }
3718 ++count;
3719
3720 /*
3721 * If there's a BPF listener, bounce a copy of this frame
3722 * to him.
3723 */
3724#ifdef ETHER_BPF_MTAP
3725 ETHER_BPF_MTAP(ifp, m_head);
3726#else
3727 BPF_MTAP(ifp, m_head);
3728#endif
3729 }
3730
3731 if (count == 0)
3732 /* No packets were dequeued. */
3733 return;
3734
3735 /* Transmit. */
3736 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3737 /* 5700 b2 errata */
3738 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3739 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3740
3741 sc->bge_tx_prodidx = prodidx;
3742
3743 /*
3744 * Set a timeout in case the chip goes out to lunch.
3745 */
3746 sc->bge_timer = 5;
3747}
3748
3749/*
3750 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3751 * to the mbuf data regions directly in the transmit descriptors.
3752 */
3753static void
3754bge_start(struct ifnet *ifp)
3755{
3756 struct bge_softc *sc;
3757
3758 sc = ifp->if_softc;
3759 BGE_LOCK(sc);
3760 bge_start_locked(ifp);
3761 BGE_UNLOCK(sc);
3762}
3763
3764static void
3765bge_init_locked(struct bge_softc *sc)
3766{
3767 struct ifnet *ifp;
3768 uint16_t *m;
3769
3770 BGE_LOCK_ASSERT(sc);
3771
3772 ifp = sc->bge_ifp;
3773
3774 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3775 return;
3776
3777 /* Cancel pending I/O and flush buffers. */
3778 bge_stop(sc);
3779
3780 bge_stop_fw(sc);
3781 bge_sig_pre_reset(sc, BGE_RESET_START);
3782 bge_reset(sc);
3783 bge_sig_legacy(sc, BGE_RESET_START);
3784 bge_sig_post_reset(sc, BGE_RESET_START);
3785
3786 bge_chipinit(sc);
3787
3788 /*
3789 * Init the various state machines, ring
3790 * control blocks and firmware.
3791 */
3792 if (bge_blockinit(sc)) {
3793 device_printf(sc->bge_dev, "initialization failure\n");
3794 return;
3795 }
3796
3797 ifp = sc->bge_ifp;
3798
3799 /* Specify MTU. */
3800 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3801 ETHER_HDR_LEN + ETHER_CRC_LEN +
3802 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
3803
3804 /* Load our MAC address. */
3805 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3806 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3807 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3808
3809 /* Program promiscuous mode. */
3810 bge_setpromisc(sc);
3811
3812 /* Program multicast filter. */
3813 bge_setmulti(sc);
3814
3815 /* Program VLAN tag stripping. */
3816 bge_setvlan(sc);
3817
3818 /* Init RX ring. */
3819 bge_init_rx_ring_std(sc);
3820
3821 /*
3822 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3823 * memory to insure that the chip has in fact read the first
3824 * entry of the ring.
3825 */
3826 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3827 uint32_t v, i;
3828 for (i = 0; i < 10; i++) {
3829 DELAY(20);
3830 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3831 if (v == (MCLBYTES - ETHER_ALIGN))
3832 break;
3833 }
3834 if (i == 10)
3835 device_printf (sc->bge_dev,
3836 "5705 A0 chip failed to load RX ring\n");
3837 }
3838
3839 /* Init jumbo RX ring. */
3840 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3841 bge_init_rx_ring_jumbo(sc);
3842
3843 /* Init our RX return ring index. */
3844 sc->bge_rx_saved_considx = 0;
3845
3846 /* Init our RX/TX stat counters. */
3847 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3848
3849 /* Init TX ring. */
3850 bge_init_tx_ring(sc);
3851
3852 /* Turn on transmitter. */
3853 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3854
3855 /* Turn on receiver. */
3856 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3857
3858 /* Tell firmware we're alive. */
3859 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3860
3861#ifdef DEVICE_POLLING
3862 /* Disable interrupts if we are polling. */
3863 if (ifp->if_capenable & IFCAP_POLLING) {
3864 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3865 BGE_PCIMISCCTL_MASK_PCI_INTR);
3866 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3867 } else
3868#endif
3869
3870 /* Enable host interrupts. */
3871 {
3872 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3873 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3874 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3875 }
3876
3877 bge_ifmedia_upd_locked(ifp);
3878
3879 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3880 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3881
3882 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3883}
3884
3885static void
3886bge_init(void *xsc)
3887{
3888 struct bge_softc *sc = xsc;
3889
3890 BGE_LOCK(sc);
3891 bge_init_locked(sc);
3892 BGE_UNLOCK(sc);
3893}
3894
3895/*
3896 * Set media options.
3897 */
3898static int
3899bge_ifmedia_upd(struct ifnet *ifp)
3900{
3901 struct bge_softc *sc = ifp->if_softc;
3902 int res;
3903
3904 BGE_LOCK(sc);
3905 res = bge_ifmedia_upd_locked(ifp);
3906 BGE_UNLOCK(sc);
3907
3908 return (res);
3909}
3910
3911static int
3912bge_ifmedia_upd_locked(struct ifnet *ifp)
3913{
3914 struct bge_softc *sc = ifp->if_softc;
3915 struct mii_data *mii;
3916 struct mii_softc *miisc;
3917 struct ifmedia *ifm;
3918
3919 BGE_LOCK_ASSERT(sc);
3920
3921 ifm = &sc->bge_ifmedia;
3922
3923 /* If this is a 1000baseX NIC, enable the TBI port. */
3924 if (sc->bge_flags & BGE_FLAG_TBI) {
3925 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3926 return (EINVAL);
3927 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3928 case IFM_AUTO:
3929 /*
3930 * The BCM5704 ASIC appears to have a special
3931 * mechanism for programming the autoneg
3932 * advertisement registers in TBI mode.
3933 */
3934 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3935 uint32_t sgdig;
3936 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3937 if (sgdig & BGE_SGDIGSTS_DONE) {
3938 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3939 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3940 sgdig |= BGE_SGDIGCFG_AUTO |
3941 BGE_SGDIGCFG_PAUSE_CAP |
3942 BGE_SGDIGCFG_ASYM_PAUSE;
3943 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3944 sgdig | BGE_SGDIGCFG_SEND);
3945 DELAY(5);
3946 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3947 }
3948 }
3949 break;
3950 case IFM_1000_SX:
3951 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3952 BGE_CLRBIT(sc, BGE_MAC_MODE,
3953 BGE_MACMODE_HALF_DUPLEX);
3954 } else {
3955 BGE_SETBIT(sc, BGE_MAC_MODE,
3956 BGE_MACMODE_HALF_DUPLEX);
3957 }
3958 break;
3959 default:
3960 return (EINVAL);
3961 }
3962 return (0);
3963 }
3964
3965 sc->bge_link_evt++;
3966 mii = device_get_softc(sc->bge_miibus);
3967 if (mii->mii_instance)
3968 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3969 mii_phy_reset(miisc);
3970 mii_mediachg(mii);
3971
3972 /*
3973 * Force an interrupt so that we will call bge_link_upd
3974 * if needed and clear any pending link state attention.
3975 * Without this we are not getting any further interrupts
3976 * for link state changes and thus will not UP the link and
3977 * not be able to send in bge_start_locked. The only
3978 * way to get things working was to receive a packet and
3979 * get an RX intr.
3980 * bge_tick should help for fiber cards and we might not
3981 * need to do this here if BGE_FLAG_TBI is set but as
3982 * we poll for fiber anyway it should not harm.
3983 */
3984 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3985 sc->bge_flags & BGE_FLAG_5788)
3986 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3987 else
3988 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3989
3990 return (0);
3991}
3992
3993/*
3994 * Report current media status.
3995 */
3996static void
3997bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3998{
3999 struct bge_softc *sc = ifp->if_softc;
4000 struct mii_data *mii;
4001
4002 BGE_LOCK(sc);
4003
4004 if (sc->bge_flags & BGE_FLAG_TBI) {
4005 ifmr->ifm_status = IFM_AVALID;
4006 ifmr->ifm_active = IFM_ETHER;
4007 if (CSR_READ_4(sc, BGE_MAC_STS) &
4008 BGE_MACSTAT_TBI_PCS_SYNCHED)
4009 ifmr->ifm_status |= IFM_ACTIVE;
4010 else {
4011 ifmr->ifm_active |= IFM_NONE;
4012 BGE_UNLOCK(sc);
4013 return;
4014 }
4015 ifmr->ifm_active |= IFM_1000_SX;
4016 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4017 ifmr->ifm_active |= IFM_HDX;
4018 else
4019 ifmr->ifm_active |= IFM_FDX;
4020 BGE_UNLOCK(sc);
4021 return;
4022 }
4023
4024 mii = device_get_softc(sc->bge_miibus);
4025 mii_pollstat(mii);
4026 ifmr->ifm_active = mii->mii_media_active;
4027 ifmr->ifm_status = mii->mii_media_status;
4028
4029 BGE_UNLOCK(sc);
4030}
4031
4032static int
4033bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4034{
4035 struct bge_softc *sc = ifp->if_softc;
4036 struct ifreq *ifr = (struct ifreq *) data;
4037 struct mii_data *mii;
4038 int flags, mask, error = 0;
4039
4040 switch (command) {
4041 case SIOCSIFMTU:
4042 if (ifr->ifr_mtu < ETHERMIN ||
4043 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4044 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4045 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4046 ifr->ifr_mtu > ETHERMTU))
4047 error = EINVAL;
4048 else if (ifp->if_mtu != ifr->ifr_mtu) {
4049 ifp->if_mtu = ifr->ifr_mtu;
4050 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4051 bge_init(sc);
4052 }
4053 break;
4054 case SIOCSIFFLAGS:
4055 BGE_LOCK(sc);
4056 if (ifp->if_flags & IFF_UP) {
4057 /*
4058 * If only the state of the PROMISC flag changed,
4059 * then just use the 'set promisc mode' command
4060 * instead of reinitializing the entire NIC. Doing
4061 * a full re-init means reloading the firmware and
4062 * waiting for it to start up, which may take a
4063 * second or two. Similarly for ALLMULTI.
4064 */
4065 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4066 flags = ifp->if_flags ^ sc->bge_if_flags;
4067 if (flags & IFF_PROMISC)
4068 bge_setpromisc(sc);
4069 if (flags & IFF_ALLMULTI)
4070 bge_setmulti(sc);
4071 } else
4072 bge_init_locked(sc);
4073 } else {
4074 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4075 bge_stop(sc);
4076 }
4077 }
4078 sc->bge_if_flags = ifp->if_flags;
4079 BGE_UNLOCK(sc);
4080 error = 0;
4081 break;
4082 case SIOCADDMULTI:
4083 case SIOCDELMULTI:
4084 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4085 BGE_LOCK(sc);
4086 bge_setmulti(sc);
4087 BGE_UNLOCK(sc);
4088 error = 0;
4089 }
4090 break;
4091 case SIOCSIFMEDIA:
4092 case SIOCGIFMEDIA:
4093 if (sc->bge_flags & BGE_FLAG_TBI) {
4094 error = ifmedia_ioctl(ifp, ifr,
4095 &sc->bge_ifmedia, command);
4096 } else {
4097 mii = device_get_softc(sc->bge_miibus);
4098 error = ifmedia_ioctl(ifp, ifr,
4099 &mii->mii_media, command);
4100 }
4101 break;
4102 case SIOCSIFCAP:
4103 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4104#ifdef DEVICE_POLLING
4105 if (mask & IFCAP_POLLING) {
4106 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4107 error = ether_poll_register(bge_poll, ifp);
4108 if (error)
4109 return (error);
4110 BGE_LOCK(sc);
4111 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4112 BGE_PCIMISCCTL_MASK_PCI_INTR);
4113 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4114 ifp->if_capenable |= IFCAP_POLLING;
4115 BGE_UNLOCK(sc);
4116 } else {
4117 error = ether_poll_deregister(ifp);
4118 /* Enable interrupt even in error case */
4119 BGE_LOCK(sc);
4120 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4121 BGE_PCIMISCCTL_MASK_PCI_INTR);
4122 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4123 ifp->if_capenable &= ~IFCAP_POLLING;
4124 BGE_UNLOCK(sc);
4125 }
4126 }
4127#endif
4128 if (mask & IFCAP_HWCSUM) {
4129 ifp->if_capenable ^= IFCAP_HWCSUM;
4130 if (IFCAP_HWCSUM & ifp->if_capenable &&
4131 IFCAP_HWCSUM & ifp->if_capabilities)
4132 ifp->if_hwassist = BGE_CSUM_FEATURES;
4133 else
4134 ifp->if_hwassist = 0;
4135#ifdef VLAN_CAPABILITIES
4136 VLAN_CAPABILITIES(ifp);
4137#endif
4138 }
4139
4140 if (mask & IFCAP_VLAN_MTU) {
4141 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4142 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4143 bge_init(sc);
4144 }
4145
4146 if (mask & IFCAP_VLAN_HWTAGGING) {
4147 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4148 BGE_LOCK(sc);
4149 bge_setvlan(sc);
4150 BGE_UNLOCK(sc);
4151#ifdef VLAN_CAPABILITIES
4152 VLAN_CAPABILITIES(ifp);
4153#endif
4154 }
4155
4156 break;
4157 default:
4158 error = ether_ioctl(ifp, command, data);
4159 break;
4160 }
4161
4162 return (error);
4163}
4164
4165static void
4166bge_watchdog(struct bge_softc *sc)
4167{
4168 struct ifnet *ifp;
4169
4170 BGE_LOCK_ASSERT(sc);
4171
4172 if (sc->bge_timer == 0 || --sc->bge_timer)
4173 return;
4174
4175 ifp = sc->bge_ifp;
4176
4177 if_printf(ifp, "watchdog timeout -- resetting\n");
4178
4179 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4180 bge_init_locked(sc);
4181
4182 ifp->if_oerrors++;
4183}
4184
4185/*
4186 * Stop the adapter and free any mbufs allocated to the
4187 * RX and TX lists.
4188 */
4189static void
4190bge_stop(struct bge_softc *sc)
4191{
4192 struct ifnet *ifp;
4193 struct ifmedia_entry *ifm;
4194 struct mii_data *mii = NULL;
4195 int mtmp, itmp;
4196
4197 BGE_LOCK_ASSERT(sc);
4198
4199 ifp = sc->bge_ifp;
4200
4201 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
4202 mii = device_get_softc(sc->bge_miibus);
4203
4204 callout_stop(&sc->bge_stat_ch);
4205
4206 /*
4207 * Disable all of the receiver blocks.
4208 */
4209 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4210 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4211 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4212 if (!(BGE_IS_5705_PLUS(sc)))
4213 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4214 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4215 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4216 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4217
4218 /*
4219 * Disable all of the transmit blocks.
4220 */
4221 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4222 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4223 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4224 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4225 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4226 if (!(BGE_IS_5705_PLUS(sc)))
4227 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4228 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4229
4230 /*
4231 * Shut down all of the memory managers and related
4232 * state machines.
4233 */
4234 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4235 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4236 if (!(BGE_IS_5705_PLUS(sc)))
4237 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4238 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4239 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4240 if (!(BGE_IS_5705_PLUS(sc))) {
4241 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4242 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4243 }
4244
4245 /* Disable host interrupts. */
4246 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4247 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4248
4249 /*
4250 * Tell firmware we're shutting down.
4251 */
4252
4253 bge_stop_fw(sc);
4254 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4255 bge_reset(sc);
4256 bge_sig_legacy(sc, BGE_RESET_STOP);
4257 bge_sig_post_reset(sc, BGE_RESET_STOP);
4258
4259 /*
4260 * Keep the ASF firmware running if up.
4261 */
4262 if (sc->bge_asf_mode & ASF_STACKUP)
4263 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4264 else
4265 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4266
4267 /* Free the RX lists. */
4268 bge_free_rx_ring_std(sc);
4269
4270 /* Free jumbo RX list. */
4271 if (BGE_IS_JUMBO_CAPABLE(sc))
4272 bge_free_rx_ring_jumbo(sc);
4273
4274 /* Free TX buffers. */
4275 bge_free_tx_ring(sc);
4276
4277 /*
4278 * Isolate/power down the PHY, but leave the media selection
4279 * unchanged so that things will be put back to normal when
4280 * we bring the interface back up.
4281 */
4282 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4283 itmp = ifp->if_flags;
4284 ifp->if_flags |= IFF_UP;
4285 /*
4286 * If we are called from bge_detach(), mii is already NULL.
4287 */
4288 if (mii != NULL) {
4289 ifm = mii->mii_media.ifm_cur;
4290 mtmp = ifm->ifm_media;
4291 ifm->ifm_media = IFM_ETHER | IFM_NONE;
4292 mii_mediachg(mii);
4293 ifm->ifm_media = mtmp;
4294 }
4295 ifp->if_flags = itmp;
4296 }
4297
4298 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4299
4300 /* Clear MAC's link state (PHY may still have link UP). */
4301 if (bootverbose && sc->bge_link)
4302 if_printf(sc->bge_ifp, "link DOWN\n");
4303 sc->bge_link = 0;
4304
4305 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4306}
4307
4308/*
4309 * Stop all chip I/O so that the kernel's probe routines don't
4310 * get confused by errant DMAs when rebooting.
4311 */
4312static int
4313bge_shutdown(device_t dev)
4314{
4315 struct bge_softc *sc;
4316
4317 sc = device_get_softc(dev);
4318 BGE_LOCK(sc);
4319 bge_stop(sc);
4320 bge_reset(sc);
4321 BGE_UNLOCK(sc);
4322
4323 return (0);
4324}
4325
4326static int
4327bge_suspend(device_t dev)
4328{
4329 struct bge_softc *sc;
4330
4331 sc = device_get_softc(dev);
4332 BGE_LOCK(sc);
4333 bge_stop(sc);
4334 BGE_UNLOCK(sc);
4335
4336 return (0);
4337}
4338
4339static int
4340bge_resume(device_t dev)
4341{
4342 struct bge_softc *sc;
4343 struct ifnet *ifp;
4344
4345 sc = device_get_softc(dev);
4346 BGE_LOCK(sc);
4347 ifp = sc->bge_ifp;
4348 if (ifp->if_flags & IFF_UP) {
4349 bge_init_locked(sc);
4350 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4351 bge_start_locked(ifp);
4352 }
4353 BGE_UNLOCK(sc);
4354
4355 return (0);
4356}
4357
4358static void
4359bge_link_upd(struct bge_softc *sc)
4360{
4361 struct mii_data *mii;
4362 uint32_t link, status;
4363
4364 BGE_LOCK_ASSERT(sc);
4365
4366 /* Clear 'pending link event' flag. */
4367 sc->bge_link_evt = 0;
4368
4369 /*
4370 * Process link state changes.
4371 * Grrr. The link status word in the status block does
4372 * not work correctly on the BCM5700 rev AX and BX chips,
4373 * according to all available information. Hence, we have
4374 * to enable MII interrupts in order to properly obtain
4375 * async link changes. Unfortunately, this also means that
4376 * we have to read the MAC status register to detect link
4377 * changes, thereby adding an additional register access to
4378 * the interrupt handler.
4379 *
4380 * XXX: perhaps link state detection procedure used for
4381 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4382 */
4383
4384 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4385 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4386 status = CSR_READ_4(sc, BGE_MAC_STS);
4387 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4388 mii = device_get_softc(sc->bge_miibus);
4389 mii_pollstat(mii);
4390 if (!sc->bge_link &&
4391 mii->mii_media_status & IFM_ACTIVE &&
4392 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4393 sc->bge_link++;
4394 if (bootverbose)
4395 if_printf(sc->bge_ifp, "link UP\n");
4396 } else if (sc->bge_link &&
4397 (!(mii->mii_media_status & IFM_ACTIVE) ||
4398 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4399 sc->bge_link = 0;
4400 if (bootverbose)
4401 if_printf(sc->bge_ifp, "link DOWN\n");
4402 }
4403
4404 /* Clear the interrupt. */
4405 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4406 BGE_EVTENB_MI_INTERRUPT);
4407 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4408 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4409 BRGPHY_INTRS);
4410 }
4411 return;
4412 }
4413
4414 if (sc->bge_flags & BGE_FLAG_TBI) {
4415 status = CSR_READ_4(sc, BGE_MAC_STS);
4416 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4417 if (!sc->bge_link) {
4418 sc->bge_link++;
4419 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4420 BGE_CLRBIT(sc, BGE_MAC_MODE,
4421 BGE_MACMODE_TBI_SEND_CFGS);
4422 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4423 if (bootverbose)
4424 if_printf(sc->bge_ifp, "link UP\n");
4425 if_link_state_change(sc->bge_ifp,
4426 LINK_STATE_UP);
4427 }
4428 } else if (sc->bge_link) {
4429 sc->bge_link = 0;
4430 if (bootverbose)
4431 if_printf(sc->bge_ifp, "link DOWN\n");
4432 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4433 }
4434 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4435 /*
4436 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4437 * in status word always set. Workaround this bug by reading
4438 * PHY link status directly.
4439 */
4440 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4441
4442 if (link != sc->bge_link ||
4443 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4444 mii = device_get_softc(sc->bge_miibus);
4445 mii_pollstat(mii);
4446 if (!sc->bge_link &&
4447 mii->mii_media_status & IFM_ACTIVE &&
4448 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4449 sc->bge_link++;
4450 if (bootverbose)
4451 if_printf(sc->bge_ifp, "link UP\n");
4452 } else if (sc->bge_link &&
4453 (!(mii->mii_media_status & IFM_ACTIVE) ||
4454 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4455 sc->bge_link = 0;
4456 if (bootverbose)
4457 if_printf(sc->bge_ifp, "link DOWN\n");
4458 }
4459 }
4460 } else {
4461 /*
4462 * Discard link events for MII/GMII controllers
4463 * if MI auto-polling is disabled.
4464 */
4465 }
4466
4467 /* Clear the attention. */
4468 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4469 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4470 BGE_MACSTAT_LINK_CHANGED);
4471}
4472
4473#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
4474 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
4475 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
4476 desc)
4477
4478static void
4479bge_add_sysctls(struct bge_softc *sc)
4480{
4481 struct sysctl_ctx_list *ctx;
4482 struct sysctl_oid_list *children, *schildren;
4483 struct sysctl_oid *tree;
4484
4485 ctx = device_get_sysctl_ctx(sc->bge_dev);
4486 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4487
4488#ifdef BGE_REGISTER_DEBUG
4489 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4490 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4491 "Debug Information");
4492
4493 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4494 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4495 "Register Read");
4496
4497 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4498 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4499 "Memory Read");
4500
4501#endif
4502
4503 if (BGE_IS_5705_PLUS(sc))
4504 return;
4505
4506 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4507 NULL, "BGE Statistics");
4508 schildren = children = SYSCTL_CHILDREN(tree);
4509 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
4510 children, COSFramesDroppedDueToFilters,
4511 "FramesDroppedDueToFilters");
4512 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
4513 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
4514 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
4515 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
4516 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
4517 children, nicNoMoreRxBDs, "NoMoreRxBDs");
4518 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
4519 children, ifInDiscards, "InputDiscards");
4520 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
4521 children, ifInErrors, "InputErrors");
4522 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
4523 children, nicRecvThresholdHit, "RecvThresholdHit");
4524 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
4525 children, nicDmaReadQueueFull, "DmaReadQueueFull");
4526 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
4527 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
4528 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
4529 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
4530 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
4531 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
4532 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
4533 children, nicRingStatusUpdate, "RingStatusUpdate");
4534 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
4535 children, nicInterrupts, "Interrupts");
4536 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
4537 children, nicAvoidedInterrupts, "AvoidedInterrupts");
4538 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
4539 children, nicSendThresholdHit, "SendThresholdHit");
4540
4541 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
4542 NULL, "BGE RX Statistics");
4543 children = SYSCTL_CHILDREN(tree);
4544 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
4545 children, rxstats.ifHCInOctets, "Octets");
4546 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
4547 children, rxstats.etherStatsFragments, "Fragments");
4548 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
4549 children, rxstats.ifHCInUcastPkts, "UcastPkts");
4550 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
4551 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
4552 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
4553 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
4554 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
4555 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
4556 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
4557 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
4558 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
4559 children, rxstats.xoffPauseFramesReceived,
4560 "xoffPauseFramesReceived");
4561 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
4562 children, rxstats.macControlFramesReceived,
4563 "ControlFramesReceived");
4564 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
4565 children, rxstats.xoffStateEntered, "xoffStateEntered");
4566 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
4567 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
4568 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
4569 children, rxstats.etherStatsJabbers, "Jabbers");
4570 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
4571 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
4572 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
4573 children, rxstats.inRangeLengthError, "inRangeLengthError");
4574 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
4575 children, rxstats.outRangeLengthError, "outRangeLengthError");
4576
4577 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
4578 NULL, "BGE TX Statistics");
4579 children = SYSCTL_CHILDREN(tree);
4580 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
4581 children, txstats.ifHCOutOctets, "Octets");
4582 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
4583 children, txstats.etherStatsCollisions, "Collisions");
4584 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
4585 children, txstats.outXonSent, "XonSent");
4586 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
4587 children, txstats.outXoffSent, "XoffSent");
4588 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
4589 children, txstats.flowControlDone, "flowControlDone");
4590 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
4591 children, txstats.dot3StatsInternalMacTransmitErrors,
4592 "InternalMacTransmitErrors");
4593 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
4594 children, txstats.dot3StatsSingleCollisionFrames,
4595 "SingleCollisionFrames");
4596 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
4597 children, txstats.dot3StatsMultipleCollisionFrames,
4598 "MultipleCollisionFrames");
4599 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
4600 children, txstats.dot3StatsDeferredTransmissions,
4601 "DeferredTransmissions");
4602 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
4603 children, txstats.dot3StatsExcessiveCollisions,
4604 "ExcessiveCollisions");
4605 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
4606 children, txstats.dot3StatsLateCollisions,
4607 "LateCollisions");
4608 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
4609 children, txstats.ifHCOutUcastPkts, "UcastPkts");
4610 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
4611 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
4612 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
4613 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
4614 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
4615 children, txstats.dot3StatsCarrierSenseErrors,
4616 "CarrierSenseErrors");
4617 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
4618 children, txstats.ifOutDiscards, "Discards");
4619 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
4620 children, txstats.ifOutErrors, "Errors");
4621}
4622
4623static int
4624bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
4625{
4626 struct bge_softc *sc;
4627 uint32_t result;
4628 int offset;
4629
4630 sc = (struct bge_softc *)arg1;
4631 offset = arg2;
4632 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
4633 offsetof(bge_hostaddr, bge_addr_lo));
4634 return (sysctl_handle_int(oidp, &result, 0, req));
4635}
4636
4637#ifdef BGE_REGISTER_DEBUG
4638static int
4639bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4640{
4641 struct bge_softc *sc;
4642 uint16_t *sbdata;
4643 int error;
4644 int result;
4645 int i, j;
4646
4647 result = -1;
4648 error = sysctl_handle_int(oidp, &result, 0, req);
4649 if (error || (req->newptr == NULL))
4650 return (error);
4651
4652 if (result == 1) {
4653 sc = (struct bge_softc *)arg1;
4654
4655 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
4656 printf("Status Block:\n");
4657 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
4658 printf("%06x:", i);
4659 for (j = 0; j < 8; j++) {
4660 printf(" %04x", sbdata[i]);
4661 i += 4;
4662 }
4663 printf("\n");
4664 }
4665
4666 printf("Registers:\n");
4667 for (i = 0x800; i < 0xA00; ) {
4668 printf("%06x:", i);
4669 for (j = 0; j < 8; j++) {
4670 printf(" %08x", CSR_READ_4(sc, i));
4671 i += 4;
4672 }
4673 printf("\n");
4674 }
4675
4676 printf("Hardware Flags:\n");
4677 if (BGE_IS_575X_PLUS(sc))
4678 printf(" - 575X Plus\n");
4679 if (BGE_IS_5705_PLUS(sc))
4680 printf(" - 5705 Plus\n");
4681 if (BGE_IS_5714_FAMILY(sc))
4682 printf(" - 5714 Family\n");
4683 if (BGE_IS_5700_FAMILY(sc))
4684 printf(" - 5700 Family\n");
4685 if (sc->bge_flags & BGE_FLAG_JUMBO)
4686 printf(" - Supports Jumbo Frames\n");
4687 if (sc->bge_flags & BGE_FLAG_PCIX)
4688 printf(" - PCI-X Bus\n");
4689 if (sc->bge_flags & BGE_FLAG_PCIE)
4690 printf(" - PCI Express Bus\n");
4691 if (sc->bge_flags & BGE_FLAG_NO_3LED)
4692 printf(" - No 3 LEDs\n");
4693 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
4694 printf(" - RX Alignment Bug\n");
4695 }
4696
4697 return (error);
4698}
4699
4700static int
4701bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
4702{
4703 struct bge_softc *sc;
4704 int error;
4705 uint16_t result;
4706 uint32_t val;
4707
4708 result = -1;
4709 error = sysctl_handle_int(oidp, &result, 0, req);
4710 if (error || (req->newptr == NULL))
4711 return (error);
4712
4713 if (result < 0x8000) {
4714 sc = (struct bge_softc *)arg1;
4715 val = CSR_READ_4(sc, result);
4716 printf("reg 0x%06X = 0x%08X\n", result, val);
4717 }
4718
4719 return (error);
4720}
4721
4722static int
4723bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
4724{
4725 struct bge_softc *sc;
4726 int error;
4727 uint16_t result;
4728 uint32_t val;
4729
4730 result = -1;
4731 error = sysctl_handle_int(oidp, &result, 0, req);
4732 if (error || (req->newptr == NULL))
4733 return (error);
4734
4735 if (result < 0x8000) {
4736 sc = (struct bge_softc *)arg1;
4737 val = bge_readmem_ind(sc, result);
4738 printf("mem 0x%06X = 0x%08X\n", result, val);
4739 }
4740
4741 return (error);
4742}
4743#endif
4744
4745static int
4746bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
4747{
4748
4749 if (sc->bge_flags & BGE_FLAG_EADDR)
4750 return (1);
4751
4752#ifdef __sparc64__
4753 OF_getetheraddr(sc->bge_dev, ether_addr);
4754 return (0);
4755#endif
4756 return (1);
4757}
4758
4759static int
4760bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4761{
4762 uint32_t mac_addr;
4763
4764 mac_addr = bge_readmem_ind(sc, 0x0c14);
4765 if ((mac_addr >> 16) == 0x484b) {
4766 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4767 ether_addr[1] = (uint8_t)mac_addr;
4768 mac_addr = bge_readmem_ind(sc, 0x0c18);
4769 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4770 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4771 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4772 ether_addr[5] = (uint8_t)mac_addr;
4773 return (0);
4774 }
4775 return (1);
4776}
4777
4778static int
4779bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4780{
4781 int mac_offset = BGE_EE_MAC_OFFSET;
4782
4783 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4784 mac_offset = BGE_EE_MAC_OFFSET_5906;
4785
4786 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
4787 ETHER_ADDR_LEN));
4788}
4789
4790static int
4791bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4792{
4793
4794 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4795 return (1);
4796
4797 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4798 ETHER_ADDR_LEN));
4799}
4800
4801static int
4802bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
4803{
4804 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4805 /* NOTE: Order is critical */
4806 bge_get_eaddr_fw,
4807 bge_get_eaddr_mem,
4808 bge_get_eaddr_nvram,
4809 bge_get_eaddr_eeprom,
4810 NULL
4811 };
4812 const bge_eaddr_fcn_t *func;
4813
4814 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4815 if ((*func)(sc, eaddr) == 0)
4816 break;
4817 }
4818 return (*func == NULL ? ENXIO : 0);
4819}
1411
1412 /* Set the timer prescaler (always 66Mhz) */
1413 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1414
1415 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1416 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1417 DELAY(40); /* XXX */
1418
1419 /* Put PHY into ready state */
1420 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1421 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1422 DELAY(40);
1423 }
1424
1425 return (0);
1426}
1427
1428static int
1429bge_blockinit(struct bge_softc *sc)
1430{
1431 struct bge_rcb *rcb;
1432 bus_size_t vrcb;
1433 bge_hostaddr taddr;
1434 uint32_t val;
1435 int i;
1436
1437 /*
1438 * Initialize the memory window pointer register so that
1439 * we can access the first 32K of internal NIC RAM. This will
1440 * allow us to set up the TX send ring RCBs and the RX return
1441 * ring RCBs, plus other things which live in NIC memory.
1442 */
1443 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1444
1445 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1446
1447 if (!(BGE_IS_5705_PLUS(sc))) {
1448 /* Configure mbuf memory pool */
1449 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1450 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1451 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1452 else
1453 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1454
1455 /* Configure DMA resource pool */
1456 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1457 BGE_DMA_DESCRIPTORS);
1458 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1459 }
1460
1461 /* Configure mbuf pool watermarks */
1462 if (!BGE_IS_5705_PLUS(sc)) {
1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1465 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1466 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1469 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1470 } else {
1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1474 }
1475
1476 /* Configure DMA resource watermarks */
1477 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1478 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1479
1480 /* Enable buffer manager */
1481 if (!(BGE_IS_5705_PLUS(sc))) {
1482 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1483 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1484
1485 /* Poll for buffer manager start indication */
1486 for (i = 0; i < BGE_TIMEOUT; i++) {
1487 DELAY(10);
1488 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1489 break;
1490 }
1491
1492 if (i == BGE_TIMEOUT) {
1493 device_printf(sc->bge_dev,
1494 "buffer manager failed to start\n");
1495 return (ENXIO);
1496 }
1497 }
1498
1499 /* Enable flow-through queues */
1500 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1501 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1502
1503 /* Wait until queue initialization is complete */
1504 for (i = 0; i < BGE_TIMEOUT; i++) {
1505 DELAY(10);
1506 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1507 break;
1508 }
1509
1510 if (i == BGE_TIMEOUT) {
1511 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1512 return (ENXIO);
1513 }
1514
1515 /* Initialize the standard RX ring control block */
1516 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1517 rcb->bge_hostaddr.bge_addr_lo =
1518 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1519 rcb->bge_hostaddr.bge_addr_hi =
1520 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1521 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1522 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1523 if (BGE_IS_5705_PLUS(sc))
1524 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1525 else
1526 rcb->bge_maxlen_flags =
1527 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1528 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1529 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1530 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1531
1532 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1533 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1534
1535 /*
1536 * Initialize the jumbo RX ring control block
1537 * We set the 'ring disabled' bit in the flags
1538 * field until we're actually ready to start
1539 * using this ring (i.e. once we set the MTU
1540 * high enough to require it).
1541 */
1542 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1543 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1544
1545 rcb->bge_hostaddr.bge_addr_lo =
1546 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1547 rcb->bge_hostaddr.bge_addr_hi =
1548 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1549 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1550 sc->bge_cdata.bge_rx_jumbo_ring_map,
1551 BUS_DMASYNC_PREREAD);
1552 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1553 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1554 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1555 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1556 rcb->bge_hostaddr.bge_addr_hi);
1557 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1558 rcb->bge_hostaddr.bge_addr_lo);
1559
1560 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1561 rcb->bge_maxlen_flags);
1562 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1563
1564 /* Set up dummy disabled mini ring RCB */
1565 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1566 rcb->bge_maxlen_flags =
1567 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1568 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1569 rcb->bge_maxlen_flags);
1570 }
1571
1572 /*
1573 * Set the BD ring replentish thresholds. The recommended
1574 * values are 1/8th the number of descriptors allocated to
1575 * each ring.
1576 * XXX The 5754 requires a lower threshold, so it might be a
1577 * requirement of all 575x family chips. The Linux driver sets
1578 * the lower threshold for all 5705 family chips as well, but there
1579 * are reports that it might not need to be so strict.
1580 *
1581 * XXX Linux does some extra fiddling here for the 5906 parts as
1582 * well.
1583 */
1584 if (BGE_IS_5705_PLUS(sc))
1585 val = 8;
1586 else
1587 val = BGE_STD_RX_RING_CNT / 8;
1588 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1589 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1590
1591 /*
1592 * Disable all unused send rings by setting the 'ring disabled'
1593 * bit in the flags field of all the TX send ring control blocks.
1594 * These are located in NIC memory.
1595 */
1596 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1597 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1598 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1599 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1600 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1601 vrcb += sizeof(struct bge_rcb);
1602 }
1603
1604 /* Configure TX RCB 0 (we use only the first ring) */
1605 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1606 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1607 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1608 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1609 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1610 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1611 if (!(BGE_IS_5705_PLUS(sc)))
1612 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1613 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1614
1615 /* Disable all unused RX return rings */
1616 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1617 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1618 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1619 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1620 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1621 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1622 BGE_RCB_FLAG_RING_DISABLED));
1623 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1624 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1625 (i * (sizeof(uint64_t))), 0);
1626 vrcb += sizeof(struct bge_rcb);
1627 }
1628
1629 /* Initialize RX ring indexes */
1630 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1631 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1632 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1633
1634 /*
1635 * Set up RX return ring 0
1636 * Note that the NIC address for RX return rings is 0x00000000.
1637 * The return rings live entirely within the host, so the
1638 * nicaddr field in the RCB isn't used.
1639 */
1640 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1641 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1642 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1643 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1644 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1645 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1646 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1647
1648 /* Set random backoff seed for TX */
1649 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1650 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1651 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1652 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1653 BGE_TX_BACKOFF_SEED_MASK);
1654
1655 /* Set inter-packet gap */
1656 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1657
1658 /*
1659 * Specify which ring to use for packets that don't match
1660 * any RX rules.
1661 */
1662 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1663
1664 /*
1665 * Configure number of RX lists. One interrupt distribution
1666 * list, sixteen active lists, one bad frames class.
1667 */
1668 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1669
1670 /* Inialize RX list placement stats mask. */
1671 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1672 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1673
1674 /* Disable host coalescing until we get it set up */
1675 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1676
1677 /* Poll to make sure it's shut down. */
1678 for (i = 0; i < BGE_TIMEOUT; i++) {
1679 DELAY(10);
1680 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1681 break;
1682 }
1683
1684 if (i == BGE_TIMEOUT) {
1685 device_printf(sc->bge_dev,
1686 "host coalescing engine failed to idle\n");
1687 return (ENXIO);
1688 }
1689
1690 /* Set up host coalescing defaults */
1691 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1692 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1693 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1694 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1695 if (!(BGE_IS_5705_PLUS(sc))) {
1696 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1697 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1698 }
1699 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1700 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1701
1702 /* Set up address of statistics block */
1703 if (!(BGE_IS_5705_PLUS(sc))) {
1704 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1705 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1706 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1707 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1708 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1709 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1710 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1711 }
1712
1713 /* Set up address of status block */
1714 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1715 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1716 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1717 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1718 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1719 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1720
1721 /* Turn on host coalescing state machine */
1722 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1723
1724 /* Turn on RX BD completion state machine and enable attentions */
1725 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1726 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1727
1728 /* Turn on RX list placement state machine */
1729 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1730
1731 /* Turn on RX list selector state machine. */
1732 if (!(BGE_IS_5705_PLUS(sc)))
1733 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1734
1735 /* Turn on DMA, clear stats */
1736 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB |
1737 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR |
1738 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB |
1739 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB |
1740 ((sc->bge_flags & BGE_FLAG_TBI) ?
1741 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1742
1743 /* Set misc. local control, enable interrupts on attentions */
1744 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1745
1746#ifdef notdef
1747 /* Assert GPIO pins for PHY reset */
1748 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1749 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1750 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1751 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1752#endif
1753
1754 /* Turn on DMA completion state machine */
1755 if (!(BGE_IS_5705_PLUS(sc)))
1756 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1757
1758 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1759
1760 /* Enable host coalescing bug fix. */
1761 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1762 sc->bge_asicrev == BGE_ASICREV_BCM5787)
1763 val |= 1 << 29;
1764
1765 /* Turn on write DMA state machine */
1766 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1767 DELAY(40);
1768
1769 /* Turn on read DMA state machine */
1770 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1771 if (sc->bge_flags & BGE_FLAG_PCIE)
1772 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1773 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1774 DELAY(40);
1775
1776 /* Turn on RX data completion state machine */
1777 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1778
1779 /* Turn on RX BD initiator state machine */
1780 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1781
1782 /* Turn on RX data and RX BD initiator state machine */
1783 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1784
1785 /* Turn on Mbuf cluster free state machine */
1786 if (!(BGE_IS_5705_PLUS(sc)))
1787 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1788
1789 /* Turn on send BD completion state machine */
1790 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1791
1792 /* Turn on send data completion state machine */
1793 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1794
1795 /* Turn on send data initiator state machine */
1796 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1797
1798 /* Turn on send BD initiator state machine */
1799 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1800
1801 /* Turn on send BD selector state machine */
1802 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1803
1804 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1805 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1806 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1807
1808 /* ack/clear link change events */
1809 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1810 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1811 BGE_MACSTAT_LINK_CHANGED);
1812 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1813
1814 /* Enable PHY auto polling (for MII/GMII only) */
1815 if (sc->bge_flags & BGE_FLAG_TBI) {
1816 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1817 } else {
1818 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1819 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1820 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1821 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1822 BGE_EVTENB_MI_INTERRUPT);
1823 }
1824
1825 /*
1826 * Clear any pending link state attention.
1827 * Otherwise some link state change events may be lost until attention
1828 * is cleared by bge_intr() -> bge_link_upd() sequence.
1829 * It's not necessary on newer BCM chips - perhaps enabling link
1830 * state change attentions implies clearing pending attention.
1831 */
1832 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1833 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1834 BGE_MACSTAT_LINK_CHANGED);
1835
1836 /* Enable link state change attentions. */
1837 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1838
1839 return (0);
1840}
1841
1842const struct bge_revision *
1843bge_lookup_rev(uint32_t chipid)
1844{
1845 const struct bge_revision *br;
1846
1847 for (br = bge_revisions; br->br_name != NULL; br++) {
1848 if (br->br_chipid == chipid)
1849 return (br);
1850 }
1851
1852 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1853 if (br->br_chipid == BGE_ASICREV(chipid))
1854 return (br);
1855 }
1856
1857 return (NULL);
1858}
1859
1860const struct bge_vendor *
1861bge_lookup_vendor(uint16_t vid)
1862{
1863 const struct bge_vendor *v;
1864
1865 for (v = bge_vendors; v->v_name != NULL; v++)
1866 if (v->v_id == vid)
1867 return (v);
1868
1869 panic("%s: unknown vendor %d", __func__, vid);
1870 return (NULL);
1871}
1872
1873/*
1874 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1875 * against our list and return its name if we find a match.
1876 *
1877 * Note that since the Broadcom controller contains VPD support, we
1878 * try to get the device name string from the controller itself instead
1879 * of the compiled-in string. It guarantees we'll always announce the
1880 * right product name. We fall back to the compiled-in string when
1881 * VPD is unavailable or corrupt.
1882 */
1883static int
1884bge_probe(device_t dev)
1885{
1886 const struct bge_type *t = bge_devs;
1887 struct bge_softc *sc = device_get_softc(dev);
1888 uint16_t vid, did;
1889
1890 sc->bge_dev = dev;
1891 vid = pci_get_vendor(dev);
1892 did = pci_get_device(dev);
1893 while(t->bge_vid != 0) {
1894 if ((vid == t->bge_vid) && (did == t->bge_did)) {
1895 char model[64], buf[96];
1896 const struct bge_revision *br;
1897 const struct bge_vendor *v;
1898 uint32_t id;
1899
1900 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1901 BGE_PCIMISCCTL_ASICREV;
1902 br = bge_lookup_rev(id);
1903 v = bge_lookup_vendor(vid);
1904 {
1905#if __FreeBSD_version > 700024
1906 const char *pname;
1907
1908 if (bge_has_eaddr(sc) &&
1909 pci_get_vpd_ident(dev, &pname) == 0)
1910 snprintf(model, 64, "%s", pname);
1911 else
1912#endif
1913 snprintf(model, 64, "%s %s",
1914 v->v_name,
1915 br != NULL ? br->br_name :
1916 "NetXtreme Ethernet Controller");
1917 }
1918 snprintf(buf, 96, "%s, %sASIC rev. %#04x", model,
1919 br != NULL ? "" : "unknown ", id >> 16);
1920 device_set_desc_copy(dev, buf);
1921 if (pci_get_subvendor(dev) == DELL_VENDORID)
1922 sc->bge_flags |= BGE_FLAG_NO_3LED;
1923 if (did == BCOM_DEVICEID_BCM5755M)
1924 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1925 return (0);
1926 }
1927 t++;
1928 }
1929
1930 return (ENXIO);
1931}
1932
1933static void
1934bge_dma_free(struct bge_softc *sc)
1935{
1936 int i;
1937
1938 /* Destroy DMA maps for RX buffers. */
1939 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1940 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1941 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1942 sc->bge_cdata.bge_rx_std_dmamap[i]);
1943 }
1944
1945 /* Destroy DMA maps for jumbo RX buffers. */
1946 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1947 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1948 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1949 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1950 }
1951
1952 /* Destroy DMA maps for TX buffers. */
1953 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1954 if (sc->bge_cdata.bge_tx_dmamap[i])
1955 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1956 sc->bge_cdata.bge_tx_dmamap[i]);
1957 }
1958
1959 if (sc->bge_cdata.bge_mtag)
1960 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1961
1962
1963 /* Destroy standard RX ring. */
1964 if (sc->bge_cdata.bge_rx_std_ring_map)
1965 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1966 sc->bge_cdata.bge_rx_std_ring_map);
1967 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1968 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1969 sc->bge_ldata.bge_rx_std_ring,
1970 sc->bge_cdata.bge_rx_std_ring_map);
1971
1972 if (sc->bge_cdata.bge_rx_std_ring_tag)
1973 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1974
1975 /* Destroy jumbo RX ring. */
1976 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1977 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1978 sc->bge_cdata.bge_rx_jumbo_ring_map);
1979
1980 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1981 sc->bge_ldata.bge_rx_jumbo_ring)
1982 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1983 sc->bge_ldata.bge_rx_jumbo_ring,
1984 sc->bge_cdata.bge_rx_jumbo_ring_map);
1985
1986 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1987 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1988
1989 /* Destroy RX return ring. */
1990 if (sc->bge_cdata.bge_rx_return_ring_map)
1991 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1992 sc->bge_cdata.bge_rx_return_ring_map);
1993
1994 if (sc->bge_cdata.bge_rx_return_ring_map &&
1995 sc->bge_ldata.bge_rx_return_ring)
1996 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1997 sc->bge_ldata.bge_rx_return_ring,
1998 sc->bge_cdata.bge_rx_return_ring_map);
1999
2000 if (sc->bge_cdata.bge_rx_return_ring_tag)
2001 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2002
2003 /* Destroy TX ring. */
2004 if (sc->bge_cdata.bge_tx_ring_map)
2005 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2006 sc->bge_cdata.bge_tx_ring_map);
2007
2008 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2009 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2010 sc->bge_ldata.bge_tx_ring,
2011 sc->bge_cdata.bge_tx_ring_map);
2012
2013 if (sc->bge_cdata.bge_tx_ring_tag)
2014 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2015
2016 /* Destroy status block. */
2017 if (sc->bge_cdata.bge_status_map)
2018 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2019 sc->bge_cdata.bge_status_map);
2020
2021 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2022 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2023 sc->bge_ldata.bge_status_block,
2024 sc->bge_cdata.bge_status_map);
2025
2026 if (sc->bge_cdata.bge_status_tag)
2027 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2028
2029 /* Destroy statistics block. */
2030 if (sc->bge_cdata.bge_stats_map)
2031 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2032 sc->bge_cdata.bge_stats_map);
2033
2034 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2035 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2036 sc->bge_ldata.bge_stats,
2037 sc->bge_cdata.bge_stats_map);
2038
2039 if (sc->bge_cdata.bge_stats_tag)
2040 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2041
2042 /* Destroy the parent tag. */
2043 if (sc->bge_cdata.bge_parent_tag)
2044 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2045}
2046
2047static int
2048bge_dma_alloc(device_t dev)
2049{
2050 struct bge_dmamap_arg ctx;
2051 struct bge_softc *sc;
2052 int i, error;
2053
2054 sc = device_get_softc(dev);
2055
2056 /*
2057 * Allocate the parent bus DMA tag appropriate for PCI.
2058 */
2059 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2060 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2061 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2062 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2063
2064 if (error != 0) {
2065 device_printf(sc->bge_dev,
2066 "could not allocate parent dma tag\n");
2067 return (ENOMEM);
2068 }
2069
2070 /*
2071 * Create tag for mbufs.
2072 */
2073 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
2074 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2075 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
2076 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
2077
2078 if (error) {
2079 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2080 return (ENOMEM);
2081 }
2082
2083 /* Create DMA maps for RX buffers. */
2084 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2085 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2086 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2087 if (error) {
2088 device_printf(sc->bge_dev,
2089 "can't create DMA map for RX\n");
2090 return (ENOMEM);
2091 }
2092 }
2093
2094 /* Create DMA maps for TX buffers. */
2095 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2096 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2097 &sc->bge_cdata.bge_tx_dmamap[i]);
2098 if (error) {
2099 device_printf(sc->bge_dev,
2100 "can't create DMA map for RX\n");
2101 return (ENOMEM);
2102 }
2103 }
2104
2105 /* Create tag for standard RX ring. */
2106 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2107 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2108 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
2109 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
2110
2111 if (error) {
2112 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2113 return (ENOMEM);
2114 }
2115
2116 /* Allocate DMA'able memory for standard RX ring. */
2117 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2118 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2119 &sc->bge_cdata.bge_rx_std_ring_map);
2120 if (error)
2121 return (ENOMEM);
2122
2123 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2124
2125 /* Load the address of the standard RX ring. */
2126 ctx.bge_maxsegs = 1;
2127 ctx.sc = sc;
2128
2129 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2130 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2131 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2132
2133 if (error)
2134 return (ENOMEM);
2135
2136 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2137
2138 /* Create tags for jumbo mbufs. */
2139 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2140 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2141 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2142 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2143 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2144 if (error) {
2145 device_printf(sc->bge_dev,
2146 "could not allocate jumbo dma tag\n");
2147 return (ENOMEM);
2148 }
2149
2150 /* Create tag for jumbo RX ring. */
2151 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2152 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2153 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2154 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2155
2156 if (error) {
2157 device_printf(sc->bge_dev,
2158 "could not allocate jumbo ring dma tag\n");
2159 return (ENOMEM);
2160 }
2161
2162 /* Allocate DMA'able memory for jumbo RX ring. */
2163 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2164 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
2165 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2166 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2167 if (error)
2168 return (ENOMEM);
2169
2170 /* Load the address of the jumbo RX ring. */
2171 ctx.bge_maxsegs = 1;
2172 ctx.sc = sc;
2173
2174 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2175 sc->bge_cdata.bge_rx_jumbo_ring_map,
2176 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2177 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2178
2179 if (error)
2180 return (ENOMEM);
2181
2182 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2183
2184 /* Create DMA maps for jumbo RX buffers. */
2185 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2186 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2187 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2188 if (error) {
2189 device_printf(sc->bge_dev,
2190 "can't create DMA map for jumbo RX\n");
2191 return (ENOMEM);
2192 }
2193 }
2194
2195 }
2196
2197 /* Create tag for RX return ring. */
2198 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2199 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2200 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2201 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2202
2203 if (error) {
2204 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2205 return (ENOMEM);
2206 }
2207
2208 /* Allocate DMA'able memory for RX return ring. */
2209 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2210 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2211 &sc->bge_cdata.bge_rx_return_ring_map);
2212 if (error)
2213 return (ENOMEM);
2214
2215 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2216 BGE_RX_RTN_RING_SZ(sc));
2217
2218 /* Load the address of the RX return ring. */
2219 ctx.bge_maxsegs = 1;
2220 ctx.sc = sc;
2221
2222 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2223 sc->bge_cdata.bge_rx_return_ring_map,
2224 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2225 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2226
2227 if (error)
2228 return (ENOMEM);
2229
2230 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2231
2232 /* Create tag for TX ring. */
2233 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2234 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2235 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2236 &sc->bge_cdata.bge_tx_ring_tag);
2237
2238 if (error) {
2239 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2240 return (ENOMEM);
2241 }
2242
2243 /* Allocate DMA'able memory for TX ring. */
2244 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2245 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2246 &sc->bge_cdata.bge_tx_ring_map);
2247 if (error)
2248 return (ENOMEM);
2249
2250 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2251
2252 /* Load the address of the TX ring. */
2253 ctx.bge_maxsegs = 1;
2254 ctx.sc = sc;
2255
2256 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2257 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2258 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2259
2260 if (error)
2261 return (ENOMEM);
2262
2263 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2264
2265 /* Create tag for status block. */
2266 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2267 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2268 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2269 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2270
2271 if (error) {
2272 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2273 return (ENOMEM);
2274 }
2275
2276 /* Allocate DMA'able memory for status block. */
2277 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2278 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2279 &sc->bge_cdata.bge_status_map);
2280 if (error)
2281 return (ENOMEM);
2282
2283 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2284
2285 /* Load the address of the status block. */
2286 ctx.sc = sc;
2287 ctx.bge_maxsegs = 1;
2288
2289 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2290 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2291 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2292
2293 if (error)
2294 return (ENOMEM);
2295
2296 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2297
2298 /* Create tag for statistics block. */
2299 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2300 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2301 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2302 &sc->bge_cdata.bge_stats_tag);
2303
2304 if (error) {
2305 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2306 return (ENOMEM);
2307 }
2308
2309 /* Allocate DMA'able memory for statistics block. */
2310 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2311 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2312 &sc->bge_cdata.bge_stats_map);
2313 if (error)
2314 return (ENOMEM);
2315
2316 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2317
2318 /* Load the address of the statstics block. */
2319 ctx.sc = sc;
2320 ctx.bge_maxsegs = 1;
2321
2322 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2323 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2324 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2325
2326 if (error)
2327 return (ENOMEM);
2328
2329 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2330
2331 return (0);
2332}
2333
2334#if __FreeBSD_version > 602105
2335/*
2336 * Return true if this device has more than one port.
2337 */
2338static int
2339bge_has_multiple_ports(struct bge_softc *sc)
2340{
2341 device_t dev = sc->bge_dev;
2342 u_int b, d, f, fscan, s;
2343
2344 d = pci_get_domain(dev);
2345 b = pci_get_bus(dev);
2346 s = pci_get_slot(dev);
2347 f = pci_get_function(dev);
2348 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2349 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2350 return (1);
2351 return (0);
2352}
2353
2354/*
2355 * Return true if MSI can be used with this device.
2356 */
2357static int
2358bge_can_use_msi(struct bge_softc *sc)
2359{
2360 int can_use_msi = 0;
2361
2362 switch (sc->bge_asicrev) {
2363 case BGE_ASICREV_BCM5714_A0:
2364 case BGE_ASICREV_BCM5714:
2365 /*
2366 * Apparently, MSI doesn't work when these chips are
2367 * configured in single-port mode.
2368 */
2369 if (bge_has_multiple_ports(sc))
2370 can_use_msi = 1;
2371 break;
2372 case BGE_ASICREV_BCM5750:
2373 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2374 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2375 can_use_msi = 1;
2376 break;
2377 default:
2378 if (BGE_IS_575X_PLUS(sc))
2379 can_use_msi = 1;
2380 }
2381 return (can_use_msi);
2382}
2383#endif
2384
2385static int
2386bge_attach(device_t dev)
2387{
2388 struct ifnet *ifp;
2389 struct bge_softc *sc;
2390 uint32_t hwcfg = 0, misccfg;
2391 u_char eaddr[ETHER_ADDR_LEN];
2392 int error, reg, rid, trys;
2393
2394 sc = device_get_softc(dev);
2395 sc->bge_dev = dev;
2396
2397 /*
2398 * Map control/status registers.
2399 */
2400 pci_enable_busmaster(dev);
2401
2402 rid = BGE_PCI_BAR0;
2403 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2404 RF_ACTIVE);
2405
2406 if (sc->bge_res == NULL) {
2407 device_printf (sc->bge_dev, "couldn't map memory\n");
2408 error = ENXIO;
2409 goto fail;
2410 }
2411
2412 /* Save various chip information. */
2413 sc->bge_chipid =
2414 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2415 BGE_PCIMISCCTL_ASICREV;
2416 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2417 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2418
2419 /*
2420 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2421 * 5705 A0 and A1 chips.
2422 */
2423 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2424 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2425 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2426 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2427 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2428
2429 if (bge_has_eaddr(sc))
2430 sc->bge_flags |= BGE_FLAG_EADDR;
2431
2432 /* Save chipset family. */
2433 switch (sc->bge_asicrev) {
2434 case BGE_ASICREV_BCM5700:
2435 case BGE_ASICREV_BCM5701:
2436 case BGE_ASICREV_BCM5703:
2437 case BGE_ASICREV_BCM5704:
2438 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2439 break;
2440 case BGE_ASICREV_BCM5714_A0:
2441 case BGE_ASICREV_BCM5780:
2442 case BGE_ASICREV_BCM5714:
2443 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2444 /* FALLTHRU */
2445 case BGE_ASICREV_BCM5750:
2446 case BGE_ASICREV_BCM5752:
2447 case BGE_ASICREV_BCM5755:
2448 case BGE_ASICREV_BCM5787:
2449 case BGE_ASICREV_BCM5906:
2450 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2451 /* FALLTHRU */
2452 case BGE_ASICREV_BCM5705:
2453 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2454 break;
2455 }
2456
2457 /* Set various bug flags. */
2458 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2459 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2460 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2461 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2462 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2463 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2464 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2465 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2466 if (BGE_IS_5705_PLUS(sc) &&
2467 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2468 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2469 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2470 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
2471 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2472 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2473 sc->bge_flags |= BGE_FLAG_BER_BUG;
2474 }
2475
2476
2477 /*
2478 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2479 * but I do not know the DEVICEID for the 5788M.
2480 */
2481 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2482 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2483 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2484 sc->bge_flags |= BGE_FLAG_5788;
2485
2486 /*
2487 * Check if this is a PCI-X or PCI Express device.
2488 */
2489#if __FreeBSD_version > 602101
2490 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2491 /*
2492 * Found a PCI Express capabilities register, this
2493 * must be a PCI Express device.
2494 */
2495 if (reg != 0) {
2496 sc->bge_flags |= BGE_FLAG_PCIE;
2497#else
2498 if (BGE_IS_5705_PLUS(sc)) {
2499 reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2500 if ((reg & 0xFF) == BGE_PCIE_CAPID) {
2501 sc->bge_flags |= BGE_FLAG_PCIE;
2502 reg = BGE_PCIE_CAPID;
2503#endif
2504 bge_set_max_readrq(sc, reg);
2505 }
2506 } else {
2507 /*
2508 * Check if the device is in PCI-X Mode.
2509 * (This bit is not valid on PCI Express controllers.)
2510 */
2511 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2512 BGE_PCISTATE_PCI_BUSMODE) == 0)
2513 sc->bge_flags |= BGE_FLAG_PCIX;
2514 }
2515
2516#if __FreeBSD_version > 602105
2517 {
2518 int msicount;
2519
2520 /*
2521 * Allocate the interrupt, using MSI if possible. These devices
2522 * support 8 MSI messages, but only the first one is used in
2523 * normal operation.
2524 */
2525 if (bge_can_use_msi(sc)) {
2526 msicount = pci_msi_count(dev);
2527 if (msicount > 1)
2528 msicount = 1;
2529 } else
2530 msicount = 0;
2531 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2532 rid = 1;
2533 sc->bge_flags |= BGE_FLAG_MSI;
2534 } else
2535 rid = 0;
2536 }
2537#else
2538 rid = 0;
2539#endif
2540
2541 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2542 RF_SHAREABLE | RF_ACTIVE);
2543
2544 if (sc->bge_irq == NULL) {
2545 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2546 error = ENXIO;
2547 goto fail;
2548 }
2549
2550 if (bootverbose)
2551 device_printf(dev,
2552 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2553 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2554 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2555 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2556
2557 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2558
2559 /* Try to reset the chip. */
2560 if (bge_reset(sc)) {
2561 device_printf(sc->bge_dev, "chip reset failed\n");
2562 error = ENXIO;
2563 goto fail;
2564 }
2565
2566 sc->bge_asf_mode = 0;
2567 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2568 == BGE_MAGIC_NUMBER)) {
2569 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2570 & BGE_HWCFG_ASF) {
2571 sc->bge_asf_mode |= ASF_ENABLE;
2572 sc->bge_asf_mode |= ASF_STACKUP;
2573 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2574 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2575 }
2576 }
2577 }
2578
2579 /* Try to reset the chip again the nice way. */
2580 bge_stop_fw(sc);
2581 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2582 if (bge_reset(sc)) {
2583 device_printf(sc->bge_dev, "chip reset failed\n");
2584 error = ENXIO;
2585 goto fail;
2586 }
2587
2588 bge_sig_legacy(sc, BGE_RESET_STOP);
2589 bge_sig_post_reset(sc, BGE_RESET_STOP);
2590
2591 if (bge_chipinit(sc)) {
2592 device_printf(sc->bge_dev, "chip initialization failed\n");
2593 error = ENXIO;
2594 goto fail;
2595 }
2596
2597 error = bge_get_eaddr(sc, eaddr);
2598 if (error) {
2599 device_printf(sc->bge_dev,
2600 "failed to read station address\n");
2601 error = ENXIO;
2602 goto fail;
2603 }
2604
2605 /* 5705 limits RX return ring to 512 entries. */
2606 if (BGE_IS_5705_PLUS(sc))
2607 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2608 else
2609 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2610
2611 if (bge_dma_alloc(dev)) {
2612 device_printf(sc->bge_dev,
2613 "failed to allocate DMA resources\n");
2614 error = ENXIO;
2615 goto fail;
2616 }
2617
2618 /* Set default tuneable values. */
2619 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2620 sc->bge_rx_coal_ticks = 150;
2621 sc->bge_tx_coal_ticks = 150;
2622 sc->bge_rx_max_coal_bds = 10;
2623 sc->bge_tx_max_coal_bds = 10;
2624
2625 /* Set up ifnet structure */
2626 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2627 if (ifp == NULL) {
2628 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2629 error = ENXIO;
2630 goto fail;
2631 }
2632 ifp->if_softc = sc;
2633 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2634 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2635 ifp->if_ioctl = bge_ioctl;
2636 ifp->if_start = bge_start;
2637 ifp->if_init = bge_init;
2638 ifp->if_mtu = ETHERMTU;
2639 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2640 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2641 IFQ_SET_READY(&ifp->if_snd);
2642 ifp->if_hwassist = BGE_CSUM_FEATURES;
2643 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2644 IFCAP_VLAN_MTU;
2645#ifdef IFCAP_VLAN_HWCSUM
2646 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2647#endif
2648 ifp->if_capenable = ifp->if_capabilities;
2649#ifdef DEVICE_POLLING
2650 ifp->if_capabilities |= IFCAP_POLLING;
2651#endif
2652
2653 /*
2654 * 5700 B0 chips do not support checksumming correctly due
2655 * to hardware bugs.
2656 */
2657 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2658 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2659 ifp->if_capenable &= IFCAP_HWCSUM;
2660 ifp->if_hwassist = 0;
2661 }
2662
2663 /*
2664 * Figure out what sort of media we have by checking the
2665 * hardware config word in the first 32k of NIC internal memory,
2666 * or fall back to examining the EEPROM if necessary.
2667 * Note: on some BCM5700 cards, this value appears to be unset.
2668 * If that's the case, we have to rely on identifying the NIC
2669 * by its PCI subsystem ID, as we do below for the SysKonnect
2670 * SK-9D41.
2671 */
2672 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2673 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2674 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2675 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2676 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2677 sizeof(hwcfg))) {
2678 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2679 error = ENXIO;
2680 goto fail;
2681 }
2682 hwcfg = ntohl(hwcfg);
2683 }
2684
2685 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2686 sc->bge_flags |= BGE_FLAG_TBI;
2687
2688 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2689 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2690 sc->bge_flags |= BGE_FLAG_TBI;
2691
2692 if (sc->bge_flags & BGE_FLAG_TBI) {
2693 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2694 bge_ifmedia_sts);
2695 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2696 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2697 0, NULL);
2698 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2699 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2700 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2701 } else {
2702 /*
2703 * Do transceiver setup and tell the firmware the
2704 * driver is down so we can try to get access the
2705 * probe if ASF is running. Retry a couple of times
2706 * if we get a conflict with the ASF firmware accessing
2707 * the PHY.
2708 */
2709 trys = 0;
2710 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2711again:
2712 bge_asf_driver_up(sc);
2713
2714 if (mii_phy_probe(dev, &sc->bge_miibus,
2715 bge_ifmedia_upd, bge_ifmedia_sts)) {
2716 if (trys++ < 4) {
2717 device_printf(sc->bge_dev, "Try again\n");
2718 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2719 BMCR_RESET);
2720 goto again;
2721 }
2722
2723 device_printf(sc->bge_dev, "MII without any PHY!\n");
2724 error = ENXIO;
2725 goto fail;
2726 }
2727
2728 /*
2729 * Now tell the firmware we are going up after probing the PHY
2730 */
2731 if (sc->bge_asf_mode & ASF_STACKUP)
2732 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2733 }
2734
2735 /*
2736 * When using the BCM5701 in PCI-X mode, data corruption has
2737 * been observed in the first few bytes of some received packets.
2738 * Aligning the packet buffer in memory eliminates the corruption.
2739 * Unfortunately, this misaligns the packet payloads. On platforms
2740 * which do not support unaligned accesses, we will realign the
2741 * payloads by copying the received packets.
2742 */
2743 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2744 sc->bge_flags & BGE_FLAG_PCIX)
2745 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2746
2747 /*
2748 * Call MI attach routine.
2749 */
2750 ether_ifattach(ifp, eaddr);
2751 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2752
2753 /*
2754 * Hookup IRQ last.
2755 */
2756#if __FreeBSD_version > 700030
2757 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2758 NULL, bge_intr, sc, &sc->bge_intrhand);
2759#else
2760 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2761 bge_intr, sc, &sc->bge_intrhand);
2762#endif
2763
2764 if (error) {
2765 bge_detach(dev);
2766 device_printf(sc->bge_dev, "couldn't set up irq\n");
2767 }
2768
2769 bge_add_sysctls(sc);
2770
2771 return (0);
2772
2773fail:
2774 bge_release_resources(sc);
2775
2776 return (error);
2777}
2778
2779static int
2780bge_detach(device_t dev)
2781{
2782 struct bge_softc *sc;
2783 struct ifnet *ifp;
2784
2785 sc = device_get_softc(dev);
2786 ifp = sc->bge_ifp;
2787
2788#ifdef DEVICE_POLLING
2789 if (ifp->if_capenable & IFCAP_POLLING)
2790 ether_poll_deregister(ifp);
2791#endif
2792
2793 BGE_LOCK(sc);
2794 bge_stop(sc);
2795 bge_reset(sc);
2796 BGE_UNLOCK(sc);
2797
2798 callout_drain(&sc->bge_stat_ch);
2799
2800 ether_ifdetach(ifp);
2801
2802 if (sc->bge_flags & BGE_FLAG_TBI) {
2803 ifmedia_removeall(&sc->bge_ifmedia);
2804 } else {
2805 bus_generic_detach(dev);
2806 device_delete_child(dev, sc->bge_miibus);
2807 }
2808
2809 bge_release_resources(sc);
2810
2811 return (0);
2812}
2813
2814static void
2815bge_release_resources(struct bge_softc *sc)
2816{
2817 device_t dev;
2818
2819 dev = sc->bge_dev;
2820
2821 if (sc->bge_intrhand != NULL)
2822 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2823
2824 if (sc->bge_irq != NULL)
2825 bus_release_resource(dev, SYS_RES_IRQ,
2826 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2827
2828#if __FreeBSD_version > 602105
2829 if (sc->bge_flags & BGE_FLAG_MSI)
2830 pci_release_msi(dev);
2831#endif
2832
2833 if (sc->bge_res != NULL)
2834 bus_release_resource(dev, SYS_RES_MEMORY,
2835 BGE_PCI_BAR0, sc->bge_res);
2836
2837 if (sc->bge_ifp != NULL)
2838 if_free(sc->bge_ifp);
2839
2840 bge_dma_free(sc);
2841
2842 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2843 BGE_LOCK_DESTROY(sc);
2844}
2845
2846static int
2847bge_reset(struct bge_softc *sc)
2848{
2849 device_t dev;
2850 uint32_t cachesize, command, pcistate, reset, val;
2851 void (*write_op)(struct bge_softc *, int, int);
2852 int i;
2853
2854 dev = sc->bge_dev;
2855
2856 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2857 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2858 if (sc->bge_flags & BGE_FLAG_PCIE)
2859 write_op = bge_writemem_direct;
2860 else
2861 write_op = bge_writemem_ind;
2862 } else
2863 write_op = bge_writereg_ind;
2864
2865 /* Save some important PCI state. */
2866 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2867 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2868 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2869
2870 pci_write_config(dev, BGE_PCI_MISC_CTL,
2871 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2872 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2873
2874 /* Disable fastboot on controllers that support it. */
2875 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2876 sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2877 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2878 if (bootverbose)
2879 device_printf(sc->bge_dev, "Disabling fastboot\n");
2880 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2881 }
2882
2883 /*
2884 * Write the magic number to SRAM at offset 0xB50.
2885 * When firmware finishes its initialization it will
2886 * write ~BGE_MAGIC_NUMBER to the same location.
2887 */
2888 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2889
2890 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
2891
2892 /* XXX: Broadcom Linux driver. */
2893 if (sc->bge_flags & BGE_FLAG_PCIE) {
2894 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
2895 CSR_WRITE_4(sc, 0x7E2C, 0x20);
2896 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2897 /* Prevent PCIE link training during global reset */
2898 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
2899 reset |= 1 << 29;
2900 }
2901 }
2902
2903 /*
2904 * Set GPHY Power Down Override to leave GPHY
2905 * powered up in D0 uninitialized.
2906 */
2907 if (BGE_IS_5705_PLUS(sc))
2908 reset |= 0x04000000;
2909
2910 /* Issue global reset */
2911 write_op(sc, BGE_MISC_CFG, reset);
2912
2913 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2914 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2915 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2916 val | BGE_VCPU_STATUS_DRV_RESET);
2917 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2918 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2919 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2920 }
2921
2922 DELAY(1000);
2923
2924 /* XXX: Broadcom Linux driver. */
2925 if (sc->bge_flags & BGE_FLAG_PCIE) {
2926 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2927 DELAY(500000); /* wait for link training to complete */
2928 val = pci_read_config(dev, 0xC4, 4);
2929 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
2930 }
2931 /*
2932 * Set PCIE max payload size to 128 bytes and clear error
2933 * status.
2934 */
2935 pci_write_config(dev, 0xD8, 0xF5000, 4);
2936 }
2937
2938 /* Reset some of the PCI state that got zapped by reset. */
2939 pci_write_config(dev, BGE_PCI_MISC_CTL,
2940 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2941 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2942 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2943 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2944 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
2945
2946 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
2947 if (BGE_IS_5714_FAMILY(sc)) {
2948 /* This chip disables MSI on reset. */
2949 if (sc->bge_flags & BGE_FLAG_MSI) {
2950 val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
2951 pci_write_config(dev, BGE_PCI_MSI_CTL,
2952 val | PCIM_MSICTRL_MSI_ENABLE, 2);
2953 val = CSR_READ_4(sc, BGE_MSI_MODE);
2954 CSR_WRITE_4(sc, BGE_MSI_MODE,
2955 val | BGE_MSIMODE_ENABLE);
2956 }
2957 val = CSR_READ_4(sc, BGE_MARB_MODE);
2958 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2959 } else
2960 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2961
2962 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2963 for (i = 0; i < BGE_TIMEOUT; i++) {
2964 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2965 if (val & BGE_VCPU_STATUS_INIT_DONE)
2966 break;
2967 DELAY(100);
2968 }
2969 if (i == BGE_TIMEOUT) {
2970 device_printf(sc->bge_dev, "reset timed out\n");
2971 return (1);
2972 }
2973 } else {
2974 /*
2975 * Poll until we see the 1's complement of the magic number.
2976 * This indicates that the firmware initialization is complete.
2977 * We expect this to fail if no chip containing the Ethernet
2978 * address is fitted though.
2979 */
2980 for (i = 0; i < BGE_TIMEOUT; i++) {
2981 DELAY(10);
2982 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2983 if (val == ~BGE_MAGIC_NUMBER)
2984 break;
2985 }
2986
2987 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
2988 device_printf(sc->bge_dev, "firmware handshake timed out, "
2989 "found 0x%08x\n", val);
2990 }
2991
2992 /*
2993 * XXX Wait for the value of the PCISTATE register to
2994 * return to its original pre-reset state. This is a
2995 * fairly good indicator of reset completion. If we don't
2996 * wait for the reset to fully complete, trying to read
2997 * from the device's non-PCI registers may yield garbage
2998 * results.
2999 */
3000 for (i = 0; i < BGE_TIMEOUT; i++) {
3001 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3002 break;
3003 DELAY(10);
3004 }
3005
3006 if (sc->bge_flags & BGE_FLAG_PCIE) {
3007 reset = bge_readmem_ind(sc, 0x7C00);
3008 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25));
3009 }
3010
3011 /* Fix up byte swapping. */
3012 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3013 BGE_MODECTL_BYTESWAP_DATA);
3014
3015 /* Tell the ASF firmware we are up */
3016 if (sc->bge_asf_mode & ASF_STACKUP)
3017 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3018
3019 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3020
3021 /*
3022 * The 5704 in TBI mode apparently needs some special
3023 * adjustment to insure the SERDES drive level is set
3024 * to 1.2V.
3025 */
3026 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3027 sc->bge_flags & BGE_FLAG_TBI) {
3028 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3029 val = (val & ~0xFFF) | 0x880;
3030 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3031 }
3032
3033 /* XXX: Broadcom Linux driver. */
3034 if (sc->bge_flags & BGE_FLAG_PCIE &&
3035 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3036 val = CSR_READ_4(sc, 0x7C00);
3037 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3038 }
3039 DELAY(10000);
3040
3041 return(0);
3042}
3043
3044/*
3045 * Frame reception handling. This is called if there's a frame
3046 * on the receive return list.
3047 *
3048 * Note: we have to be able to handle two possibilities here:
3049 * 1) the frame is from the jumbo receive ring
3050 * 2) the frame is from the standard receive ring
3051 */
3052
3053static void
3054bge_rxeof(struct bge_softc *sc)
3055{
3056 struct ifnet *ifp;
3057 int stdcnt = 0, jumbocnt = 0;
3058
3059 BGE_LOCK_ASSERT(sc);
3060
3061 /* Nothing to do. */
3062 if (sc->bge_rx_saved_considx ==
3063 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
3064 return;
3065
3066 ifp = sc->bge_ifp;
3067
3068 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3069 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3070 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3071 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
3072 if (BGE_IS_JUMBO_CAPABLE(sc))
3073 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3074 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
3075
3076 while(sc->bge_rx_saved_considx !=
3077 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
3078 struct bge_rx_bd *cur_rx;
3079 uint32_t rxidx;
3080 struct mbuf *m = NULL;
3081 uint16_t vlan_tag = 0;
3082 int have_tag = 0;
3083
3084#ifdef DEVICE_POLLING
3085 if (ifp->if_capenable & IFCAP_POLLING) {
3086 if (sc->rxcycles <= 0)
3087 break;
3088 sc->rxcycles--;
3089 }
3090#endif
3091
3092 cur_rx =
3093 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
3094
3095 rxidx = cur_rx->bge_idx;
3096 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
3097
3098 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3099 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3100 have_tag = 1;
3101 vlan_tag = cur_rx->bge_vlan_tag;
3102 }
3103
3104 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3105 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3106 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
3107 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
3108 BUS_DMASYNC_POSTREAD);
3109 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
3110 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
3111 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3112 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3113 jumbocnt++;
3114 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3115 ifp->if_ierrors++;
3116 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3117 continue;
3118 }
3119 if (bge_newbuf_jumbo(sc,
3120 sc->bge_jumbo, NULL) == ENOBUFS) {
3121 ifp->if_ierrors++;
3122 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3123 continue;
3124 }
3125 } else {
3126 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3127 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3128 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
3129 BUS_DMASYNC_POSTREAD);
3130 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3131 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
3132 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3133 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3134 stdcnt++;
3135 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3136 ifp->if_ierrors++;
3137 bge_newbuf_std(sc, sc->bge_std, m);
3138 continue;
3139 }
3140 if (bge_newbuf_std(sc, sc->bge_std,
3141 NULL) == ENOBUFS) {
3142 ifp->if_ierrors++;
3143 bge_newbuf_std(sc, sc->bge_std, m);
3144 continue;
3145 }
3146 }
3147
3148 ifp->if_ipackets++;
3149#ifndef __NO_STRICT_ALIGNMENT
3150 /*
3151 * For architectures with strict alignment we must make sure
3152 * the payload is aligned.
3153 */
3154 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3155 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3156 cur_rx->bge_len);
3157 m->m_data += ETHER_ALIGN;
3158 }
3159#endif
3160 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3161 m->m_pkthdr.rcvif = ifp;
3162
3163 if (ifp->if_capenable & IFCAP_RXCSUM) {
3164 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3165 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3166 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3167 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3168 }
3169 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3170 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3171 m->m_pkthdr.csum_data =
3172 cur_rx->bge_tcp_udp_csum;
3173 m->m_pkthdr.csum_flags |=
3174 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3175 }
3176 }
3177
3178 /*
3179 * If we received a packet with a vlan tag,
3180 * attach that information to the packet.
3181 */
3182 if (have_tag) {
3183#if __FreeBSD_version > 700022
3184 m->m_pkthdr.ether_vtag = vlan_tag;
3185 m->m_flags |= M_VLANTAG;
3186#else
3187 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3188 if (m == NULL)
3189 continue;
3190#endif
3191 }
3192
3193 BGE_UNLOCK(sc);
3194 (*ifp->if_input)(ifp, m);
3195 BGE_LOCK(sc);
3196 }
3197
3198 if (stdcnt > 0)
3199 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3200 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3201
3202 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
3203 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3204 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3205
3206 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3207 if (stdcnt)
3208 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3209 if (jumbocnt)
3210 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3211#ifdef notyet
3212 /*
3213 * This register wraps very quickly under heavy packet drops.
3214 * If you need correct statistics, you can enable this check.
3215 */
3216 if (BGE_IS_5705_PLUS(sc))
3217 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3218#endif
3219}
3220
3221static void
3222bge_txeof(struct bge_softc *sc)
3223{
3224 struct bge_tx_bd *cur_tx = NULL;
3225 struct ifnet *ifp;
3226
3227 BGE_LOCK_ASSERT(sc);
3228
3229 /* Nothing to do. */
3230 if (sc->bge_tx_saved_considx ==
3231 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
3232 return;
3233
3234 ifp = sc->bge_ifp;
3235
3236 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3237 sc->bge_cdata.bge_tx_ring_map,
3238 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3239 /*
3240 * Go through our tx ring and free mbufs for those
3241 * frames that have been sent.
3242 */
3243 while (sc->bge_tx_saved_considx !=
3244 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
3245 uint32_t idx = 0;
3246
3247 idx = sc->bge_tx_saved_considx;
3248 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3249 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3250 ifp->if_opackets++;
3251 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3252 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3253 sc->bge_cdata.bge_tx_dmamap[idx],
3254 BUS_DMASYNC_POSTWRITE);
3255 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3256 sc->bge_cdata.bge_tx_dmamap[idx]);
3257 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3258 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3259 }
3260 sc->bge_txcnt--;
3261 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3262 }
3263
3264 if (cur_tx != NULL)
3265 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3266 if (sc->bge_txcnt == 0)
3267 sc->bge_timer = 0;
3268}
3269
3270#ifdef DEVICE_POLLING
3271static void
3272bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3273{
3274 struct bge_softc *sc = ifp->if_softc;
3275 uint32_t statusword;
3276
3277 BGE_LOCK(sc);
3278 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3279 BGE_UNLOCK(sc);
3280 return;
3281 }
3282
3283 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3284 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3285
3286 statusword = atomic_readandclear_32(
3287 &sc->bge_ldata.bge_status_block->bge_status);
3288
3289 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3290 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3291
3292 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3293 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3294 sc->bge_link_evt++;
3295
3296 if (cmd == POLL_AND_CHECK_STATUS)
3297 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3298 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3299 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3300 bge_link_upd(sc);
3301
3302 sc->rxcycles = count;
3303 bge_rxeof(sc);
3304 bge_txeof(sc);
3305 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3306 bge_start_locked(ifp);
3307
3308 BGE_UNLOCK(sc);
3309}
3310#endif /* DEVICE_POLLING */
3311
3312static void
3313bge_intr(void *xsc)
3314{
3315 struct bge_softc *sc;
3316 struct ifnet *ifp;
3317 uint32_t statusword;
3318
3319 sc = xsc;
3320
3321 BGE_LOCK(sc);
3322
3323 ifp = sc->bge_ifp;
3324
3325#ifdef DEVICE_POLLING
3326 if (ifp->if_capenable & IFCAP_POLLING) {
3327 BGE_UNLOCK(sc);
3328 return;
3329 }
3330#endif
3331
3332 /*
3333 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3334 * disable interrupts by writing nonzero like we used to, since with
3335 * our current organization this just gives complications and
3336 * pessimizations for re-enabling interrupts. We used to have races
3337 * instead of the necessary complications. Disabling interrupts
3338 * would just reduce the chance of a status update while we are
3339 * running (by switching to the interrupt-mode coalescence
3340 * parameters), but this chance is already very low so it is more
3341 * efficient to get another interrupt than prevent it.
3342 *
3343 * We do the ack first to ensure another interrupt if there is a
3344 * status update after the ack. We don't check for the status
3345 * changing later because it is more efficient to get another
3346 * interrupt than prevent it, not quite as above (not checking is
3347 * a smaller optimization than not toggling the interrupt enable,
3348 * since checking doesn't involve PCI accesses and toggling require
3349 * the status check). So toggling would probably be a pessimization
3350 * even with MSI. It would only be needed for using a task queue.
3351 */
3352 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3353
3354 /*
3355 * Do the mandatory PCI flush as well as get the link status.
3356 */
3357 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3358
3359 /* Make sure the descriptor ring indexes are coherent. */
3360 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3361 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3362 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3363 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3364
3365 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3366 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3367 statusword || sc->bge_link_evt)
3368 bge_link_upd(sc);
3369
3370 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3371 /* Check RX return ring producer/consumer. */
3372 bge_rxeof(sc);
3373
3374 /* Check TX ring producer/consumer. */
3375 bge_txeof(sc);
3376 }
3377
3378 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3379 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3380 bge_start_locked(ifp);
3381
3382 BGE_UNLOCK(sc);
3383}
3384
3385static void
3386bge_asf_driver_up(struct bge_softc *sc)
3387{
3388 if (sc->bge_asf_mode & ASF_STACKUP) {
3389 /* Send ASF heartbeat aprox. every 2s */
3390 if (sc->bge_asf_count)
3391 sc->bge_asf_count --;
3392 else {
3393 sc->bge_asf_count = 5;
3394 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3395 BGE_FW_DRV_ALIVE);
3396 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3397 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3398 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3399 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3400 }
3401 }
3402}
3403
3404static void
3405bge_tick(void *xsc)
3406{
3407 struct bge_softc *sc = xsc;
3408 struct mii_data *mii = NULL;
3409
3410 BGE_LOCK_ASSERT(sc);
3411
3412 /* Synchronize with possible callout reset/stop. */
3413 if (callout_pending(&sc->bge_stat_ch) ||
3414 !callout_active(&sc->bge_stat_ch))
3415 return;
3416
3417 if (BGE_IS_5705_PLUS(sc))
3418 bge_stats_update_regs(sc);
3419 else
3420 bge_stats_update(sc);
3421
3422 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3423 mii = device_get_softc(sc->bge_miibus);
3424 /*
3425 * Do not touch PHY if we have link up. This could break
3426 * IPMI/ASF mode or produce extra input errors
3427 * (extra errors was reported for bcm5701 & bcm5704).
3428 */
3429 if (!sc->bge_link)
3430 mii_tick(mii);
3431 } else {
3432 /*
3433 * Since in TBI mode auto-polling can't be used we should poll
3434 * link status manually. Here we register pending link event
3435 * and trigger interrupt.
3436 */
3437#ifdef DEVICE_POLLING
3438 /* In polling mode we poll link state in bge_poll(). */
3439 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3440#endif
3441 {
3442 sc->bge_link_evt++;
3443 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3444 sc->bge_flags & BGE_FLAG_5788)
3445 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3446 else
3447 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3448 }
3449 }
3450
3451 bge_asf_driver_up(sc);
3452 bge_watchdog(sc);
3453
3454 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3455}
3456
3457static void
3458bge_stats_update_regs(struct bge_softc *sc)
3459{
3460 struct ifnet *ifp;
3461
3462 ifp = sc->bge_ifp;
3463
3464 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3465 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3466
3467 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3468}
3469
3470static void
3471bge_stats_update(struct bge_softc *sc)
3472{
3473 struct ifnet *ifp;
3474 bus_size_t stats;
3475 uint32_t cnt; /* current register value */
3476
3477 ifp = sc->bge_ifp;
3478
3479 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3480
3481#define READ_STAT(sc, stats, stat) \
3482 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3483
3484 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3485 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3486 sc->bge_tx_collisions = cnt;
3487
3488 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3489 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3490 sc->bge_rx_discards = cnt;
3491
3492 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3493 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3494 sc->bge_tx_discards = cnt;
3495
3496#undef READ_STAT
3497}
3498
3499/*
3500 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3501 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3502 * but when such padded frames employ the bge IP/TCP checksum offload,
3503 * the hardware checksum assist gives incorrect results (possibly
3504 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3505 * If we pad such runts with zeros, the onboard checksum comes out correct.
3506 */
3507static __inline int
3508bge_cksum_pad(struct mbuf *m)
3509{
3510 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3511 struct mbuf *last;
3512
3513 /* If there's only the packet-header and we can pad there, use it. */
3514 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3515 M_TRAILINGSPACE(m) >= padlen) {
3516 last = m;
3517 } else {
3518 /*
3519 * Walk packet chain to find last mbuf. We will either
3520 * pad there, or append a new mbuf and pad it.
3521 */
3522 for (last = m; last->m_next != NULL; last = last->m_next);
3523 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3524 /* Allocate new empty mbuf, pad it. Compact later. */
3525 struct mbuf *n;
3526
3527 MGET(n, M_DONTWAIT, MT_DATA);
3528 if (n == NULL)
3529 return (ENOBUFS);
3530 n->m_len = 0;
3531 last->m_next = n;
3532 last = n;
3533 }
3534 }
3535
3536 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3537 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3538 last->m_len += padlen;
3539 m->m_pkthdr.len += padlen;
3540
3541 return (0);
3542}
3543
3544/*
3545 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3546 * pointers to descriptors.
3547 */
3548static int
3549bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3550{
3551 bus_dma_segment_t segs[BGE_NSEG_NEW];
3552 bus_dmamap_t map;
3553 struct bge_tx_bd *d;
3554 struct mbuf *m = *m_head;
3555 uint32_t idx = *txidx;
3556 uint16_t csum_flags;
3557 int nsegs, i, error;
3558
3559 csum_flags = 0;
3560 if (m->m_pkthdr.csum_flags) {
3561 if (m->m_pkthdr.csum_flags & CSUM_IP)
3562 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3563 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3564 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3565 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3566 (error = bge_cksum_pad(m)) != 0) {
3567 m_freem(m);
3568 *m_head = NULL;
3569 return (error);
3570 }
3571 }
3572 if (m->m_flags & M_LASTFRAG)
3573 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3574 else if (m->m_flags & M_FRAG)
3575 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3576 }
3577
3578 map = sc->bge_cdata.bge_tx_dmamap[idx];
3579 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3580 &nsegs, BUS_DMA_NOWAIT);
3581 if (error == EFBIG) {
3582 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
3583 if (m == NULL) {
3584 m_freem(*m_head);
3585 *m_head = NULL;
3586 return (ENOBUFS);
3587 }
3588 *m_head = m;
3589 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3590 segs, &nsegs, BUS_DMA_NOWAIT);
3591 if (error) {
3592 m_freem(m);
3593 *m_head = NULL;
3594 return (error);
3595 }
3596 } else if (error != 0)
3597 return (error);
3598
3599 /*
3600 * Sanity check: avoid coming within 16 descriptors
3601 * of the end of the ring.
3602 */
3603 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3604 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3605 return (ENOBUFS);
3606 }
3607
3608 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3609
3610 for (i = 0; ; i++) {
3611 d = &sc->bge_ldata.bge_tx_ring[idx];
3612 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3613 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3614 d->bge_len = segs[i].ds_len;
3615 d->bge_flags = csum_flags;
3616 if (i == nsegs - 1)
3617 break;
3618 BGE_INC(idx, BGE_TX_RING_CNT);
3619 }
3620
3621 /* Mark the last segment as end of packet... */
3622 d->bge_flags |= BGE_TXBDFLAG_END;
3623
3624 /* ... and put VLAN tag into first segment. */
3625 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3626#if __FreeBSD_version > 700022
3627 if (m->m_flags & M_VLANTAG) {
3628 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3629 d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3630 } else
3631 d->bge_vlan_tag = 0;
3632#else
3633 {
3634 struct m_tag *mtag;
3635
3636 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
3637 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3638 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3639 } else
3640 d->bge_vlan_tag = 0;
3641 }
3642#endif
3643
3644 /*
3645 * Insure that the map for this transmission
3646 * is placed at the array index of the last descriptor
3647 * in this chain.
3648 */
3649 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3650 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3651 sc->bge_cdata.bge_tx_chain[idx] = m;
3652 sc->bge_txcnt += nsegs;
3653
3654 BGE_INC(idx, BGE_TX_RING_CNT);
3655 *txidx = idx;
3656
3657 return (0);
3658}
3659
3660/*
3661 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3662 * to the mbuf data regions directly in the transmit descriptors.
3663 */
3664static void
3665bge_start_locked(struct ifnet *ifp)
3666{
3667 struct bge_softc *sc;
3668 struct mbuf *m_head = NULL;
3669 uint32_t prodidx;
3670 int count = 0;
3671
3672 sc = ifp->if_softc;
3673
3674 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3675 return;
3676
3677 prodidx = sc->bge_tx_prodidx;
3678
3679 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3680 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3681 if (m_head == NULL)
3682 break;
3683
3684 /*
3685 * XXX
3686 * The code inside the if() block is never reached since we
3687 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3688 * requests to checksum TCP/UDP in a fragmented packet.
3689 *
3690 * XXX
3691 * safety overkill. If this is a fragmented packet chain
3692 * with delayed TCP/UDP checksums, then only encapsulate
3693 * it if we have enough descriptors to handle the entire
3694 * chain at once.
3695 * (paranoia -- may not actually be needed)
3696 */
3697 if (m_head->m_flags & M_FIRSTFRAG &&
3698 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3699 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3700 m_head->m_pkthdr.csum_data + 16) {
3701 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3702 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3703 break;
3704 }
3705 }
3706
3707 /*
3708 * Pack the data into the transmit ring. If we
3709 * don't have room, set the OACTIVE flag and wait
3710 * for the NIC to drain the ring.
3711 */
3712 if (bge_encap(sc, &m_head, &prodidx)) {
3713 if (m_head == NULL)
3714 break;
3715 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3716 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3717 break;
3718 }
3719 ++count;
3720
3721 /*
3722 * If there's a BPF listener, bounce a copy of this frame
3723 * to him.
3724 */
3725#ifdef ETHER_BPF_MTAP
3726 ETHER_BPF_MTAP(ifp, m_head);
3727#else
3728 BPF_MTAP(ifp, m_head);
3729#endif
3730 }
3731
3732 if (count == 0)
3733 /* No packets were dequeued. */
3734 return;
3735
3736 /* Transmit. */
3737 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3738 /* 5700 b2 errata */
3739 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3740 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3741
3742 sc->bge_tx_prodidx = prodidx;
3743
3744 /*
3745 * Set a timeout in case the chip goes out to lunch.
3746 */
3747 sc->bge_timer = 5;
3748}
3749
3750/*
3751 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3752 * to the mbuf data regions directly in the transmit descriptors.
3753 */
3754static void
3755bge_start(struct ifnet *ifp)
3756{
3757 struct bge_softc *sc;
3758
3759 sc = ifp->if_softc;
3760 BGE_LOCK(sc);
3761 bge_start_locked(ifp);
3762 BGE_UNLOCK(sc);
3763}
3764
3765static void
3766bge_init_locked(struct bge_softc *sc)
3767{
3768 struct ifnet *ifp;
3769 uint16_t *m;
3770
3771 BGE_LOCK_ASSERT(sc);
3772
3773 ifp = sc->bge_ifp;
3774
3775 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3776 return;
3777
3778 /* Cancel pending I/O and flush buffers. */
3779 bge_stop(sc);
3780
3781 bge_stop_fw(sc);
3782 bge_sig_pre_reset(sc, BGE_RESET_START);
3783 bge_reset(sc);
3784 bge_sig_legacy(sc, BGE_RESET_START);
3785 bge_sig_post_reset(sc, BGE_RESET_START);
3786
3787 bge_chipinit(sc);
3788
3789 /*
3790 * Init the various state machines, ring
3791 * control blocks and firmware.
3792 */
3793 if (bge_blockinit(sc)) {
3794 device_printf(sc->bge_dev, "initialization failure\n");
3795 return;
3796 }
3797
3798 ifp = sc->bge_ifp;
3799
3800 /* Specify MTU. */
3801 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3802 ETHER_HDR_LEN + ETHER_CRC_LEN +
3803 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
3804
3805 /* Load our MAC address. */
3806 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3807 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3808 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3809
3810 /* Program promiscuous mode. */
3811 bge_setpromisc(sc);
3812
3813 /* Program multicast filter. */
3814 bge_setmulti(sc);
3815
3816 /* Program VLAN tag stripping. */
3817 bge_setvlan(sc);
3818
3819 /* Init RX ring. */
3820 bge_init_rx_ring_std(sc);
3821
3822 /*
3823 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3824 * memory to insure that the chip has in fact read the first
3825 * entry of the ring.
3826 */
3827 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3828 uint32_t v, i;
3829 for (i = 0; i < 10; i++) {
3830 DELAY(20);
3831 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3832 if (v == (MCLBYTES - ETHER_ALIGN))
3833 break;
3834 }
3835 if (i == 10)
3836 device_printf (sc->bge_dev,
3837 "5705 A0 chip failed to load RX ring\n");
3838 }
3839
3840 /* Init jumbo RX ring. */
3841 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3842 bge_init_rx_ring_jumbo(sc);
3843
3844 /* Init our RX return ring index. */
3845 sc->bge_rx_saved_considx = 0;
3846
3847 /* Init our RX/TX stat counters. */
3848 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3849
3850 /* Init TX ring. */
3851 bge_init_tx_ring(sc);
3852
3853 /* Turn on transmitter. */
3854 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3855
3856 /* Turn on receiver. */
3857 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3858
3859 /* Tell firmware we're alive. */
3860 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3861
3862#ifdef DEVICE_POLLING
3863 /* Disable interrupts if we are polling. */
3864 if (ifp->if_capenable & IFCAP_POLLING) {
3865 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3866 BGE_PCIMISCCTL_MASK_PCI_INTR);
3867 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3868 } else
3869#endif
3870
3871 /* Enable host interrupts. */
3872 {
3873 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3874 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3875 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3876 }
3877
3878 bge_ifmedia_upd_locked(ifp);
3879
3880 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3881 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3882
3883 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3884}
3885
3886static void
3887bge_init(void *xsc)
3888{
3889 struct bge_softc *sc = xsc;
3890
3891 BGE_LOCK(sc);
3892 bge_init_locked(sc);
3893 BGE_UNLOCK(sc);
3894}
3895
3896/*
3897 * Set media options.
3898 */
3899static int
3900bge_ifmedia_upd(struct ifnet *ifp)
3901{
3902 struct bge_softc *sc = ifp->if_softc;
3903 int res;
3904
3905 BGE_LOCK(sc);
3906 res = bge_ifmedia_upd_locked(ifp);
3907 BGE_UNLOCK(sc);
3908
3909 return (res);
3910}
3911
3912static int
3913bge_ifmedia_upd_locked(struct ifnet *ifp)
3914{
3915 struct bge_softc *sc = ifp->if_softc;
3916 struct mii_data *mii;
3917 struct mii_softc *miisc;
3918 struct ifmedia *ifm;
3919
3920 BGE_LOCK_ASSERT(sc);
3921
3922 ifm = &sc->bge_ifmedia;
3923
3924 /* If this is a 1000baseX NIC, enable the TBI port. */
3925 if (sc->bge_flags & BGE_FLAG_TBI) {
3926 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3927 return (EINVAL);
3928 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3929 case IFM_AUTO:
3930 /*
3931 * The BCM5704 ASIC appears to have a special
3932 * mechanism for programming the autoneg
3933 * advertisement registers in TBI mode.
3934 */
3935 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3936 uint32_t sgdig;
3937 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3938 if (sgdig & BGE_SGDIGSTS_DONE) {
3939 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3940 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3941 sgdig |= BGE_SGDIGCFG_AUTO |
3942 BGE_SGDIGCFG_PAUSE_CAP |
3943 BGE_SGDIGCFG_ASYM_PAUSE;
3944 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3945 sgdig | BGE_SGDIGCFG_SEND);
3946 DELAY(5);
3947 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3948 }
3949 }
3950 break;
3951 case IFM_1000_SX:
3952 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3953 BGE_CLRBIT(sc, BGE_MAC_MODE,
3954 BGE_MACMODE_HALF_DUPLEX);
3955 } else {
3956 BGE_SETBIT(sc, BGE_MAC_MODE,
3957 BGE_MACMODE_HALF_DUPLEX);
3958 }
3959 break;
3960 default:
3961 return (EINVAL);
3962 }
3963 return (0);
3964 }
3965
3966 sc->bge_link_evt++;
3967 mii = device_get_softc(sc->bge_miibus);
3968 if (mii->mii_instance)
3969 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3970 mii_phy_reset(miisc);
3971 mii_mediachg(mii);
3972
3973 /*
3974 * Force an interrupt so that we will call bge_link_upd
3975 * if needed and clear any pending link state attention.
3976 * Without this we are not getting any further interrupts
3977 * for link state changes and thus will not UP the link and
3978 * not be able to send in bge_start_locked. The only
3979 * way to get things working was to receive a packet and
3980 * get an RX intr.
3981 * bge_tick should help for fiber cards and we might not
3982 * need to do this here if BGE_FLAG_TBI is set but as
3983 * we poll for fiber anyway it should not harm.
3984 */
3985 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3986 sc->bge_flags & BGE_FLAG_5788)
3987 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3988 else
3989 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3990
3991 return (0);
3992}
3993
3994/*
3995 * Report current media status.
3996 */
3997static void
3998bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3999{
4000 struct bge_softc *sc = ifp->if_softc;
4001 struct mii_data *mii;
4002
4003 BGE_LOCK(sc);
4004
4005 if (sc->bge_flags & BGE_FLAG_TBI) {
4006 ifmr->ifm_status = IFM_AVALID;
4007 ifmr->ifm_active = IFM_ETHER;
4008 if (CSR_READ_4(sc, BGE_MAC_STS) &
4009 BGE_MACSTAT_TBI_PCS_SYNCHED)
4010 ifmr->ifm_status |= IFM_ACTIVE;
4011 else {
4012 ifmr->ifm_active |= IFM_NONE;
4013 BGE_UNLOCK(sc);
4014 return;
4015 }
4016 ifmr->ifm_active |= IFM_1000_SX;
4017 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4018 ifmr->ifm_active |= IFM_HDX;
4019 else
4020 ifmr->ifm_active |= IFM_FDX;
4021 BGE_UNLOCK(sc);
4022 return;
4023 }
4024
4025 mii = device_get_softc(sc->bge_miibus);
4026 mii_pollstat(mii);
4027 ifmr->ifm_active = mii->mii_media_active;
4028 ifmr->ifm_status = mii->mii_media_status;
4029
4030 BGE_UNLOCK(sc);
4031}
4032
4033static int
4034bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4035{
4036 struct bge_softc *sc = ifp->if_softc;
4037 struct ifreq *ifr = (struct ifreq *) data;
4038 struct mii_data *mii;
4039 int flags, mask, error = 0;
4040
4041 switch (command) {
4042 case SIOCSIFMTU:
4043 if (ifr->ifr_mtu < ETHERMIN ||
4044 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4045 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4046 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4047 ifr->ifr_mtu > ETHERMTU))
4048 error = EINVAL;
4049 else if (ifp->if_mtu != ifr->ifr_mtu) {
4050 ifp->if_mtu = ifr->ifr_mtu;
4051 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4052 bge_init(sc);
4053 }
4054 break;
4055 case SIOCSIFFLAGS:
4056 BGE_LOCK(sc);
4057 if (ifp->if_flags & IFF_UP) {
4058 /*
4059 * If only the state of the PROMISC flag changed,
4060 * then just use the 'set promisc mode' command
4061 * instead of reinitializing the entire NIC. Doing
4062 * a full re-init means reloading the firmware and
4063 * waiting for it to start up, which may take a
4064 * second or two. Similarly for ALLMULTI.
4065 */
4066 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4067 flags = ifp->if_flags ^ sc->bge_if_flags;
4068 if (flags & IFF_PROMISC)
4069 bge_setpromisc(sc);
4070 if (flags & IFF_ALLMULTI)
4071 bge_setmulti(sc);
4072 } else
4073 bge_init_locked(sc);
4074 } else {
4075 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4076 bge_stop(sc);
4077 }
4078 }
4079 sc->bge_if_flags = ifp->if_flags;
4080 BGE_UNLOCK(sc);
4081 error = 0;
4082 break;
4083 case SIOCADDMULTI:
4084 case SIOCDELMULTI:
4085 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4086 BGE_LOCK(sc);
4087 bge_setmulti(sc);
4088 BGE_UNLOCK(sc);
4089 error = 0;
4090 }
4091 break;
4092 case SIOCSIFMEDIA:
4093 case SIOCGIFMEDIA:
4094 if (sc->bge_flags & BGE_FLAG_TBI) {
4095 error = ifmedia_ioctl(ifp, ifr,
4096 &sc->bge_ifmedia, command);
4097 } else {
4098 mii = device_get_softc(sc->bge_miibus);
4099 error = ifmedia_ioctl(ifp, ifr,
4100 &mii->mii_media, command);
4101 }
4102 break;
4103 case SIOCSIFCAP:
4104 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4105#ifdef DEVICE_POLLING
4106 if (mask & IFCAP_POLLING) {
4107 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4108 error = ether_poll_register(bge_poll, ifp);
4109 if (error)
4110 return (error);
4111 BGE_LOCK(sc);
4112 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4113 BGE_PCIMISCCTL_MASK_PCI_INTR);
4114 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4115 ifp->if_capenable |= IFCAP_POLLING;
4116 BGE_UNLOCK(sc);
4117 } else {
4118 error = ether_poll_deregister(ifp);
4119 /* Enable interrupt even in error case */
4120 BGE_LOCK(sc);
4121 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4122 BGE_PCIMISCCTL_MASK_PCI_INTR);
4123 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4124 ifp->if_capenable &= ~IFCAP_POLLING;
4125 BGE_UNLOCK(sc);
4126 }
4127 }
4128#endif
4129 if (mask & IFCAP_HWCSUM) {
4130 ifp->if_capenable ^= IFCAP_HWCSUM;
4131 if (IFCAP_HWCSUM & ifp->if_capenable &&
4132 IFCAP_HWCSUM & ifp->if_capabilities)
4133 ifp->if_hwassist = BGE_CSUM_FEATURES;
4134 else
4135 ifp->if_hwassist = 0;
4136#ifdef VLAN_CAPABILITIES
4137 VLAN_CAPABILITIES(ifp);
4138#endif
4139 }
4140
4141 if (mask & IFCAP_VLAN_MTU) {
4142 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4143 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4144 bge_init(sc);
4145 }
4146
4147 if (mask & IFCAP_VLAN_HWTAGGING) {
4148 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4149 BGE_LOCK(sc);
4150 bge_setvlan(sc);
4151 BGE_UNLOCK(sc);
4152#ifdef VLAN_CAPABILITIES
4153 VLAN_CAPABILITIES(ifp);
4154#endif
4155 }
4156
4157 break;
4158 default:
4159 error = ether_ioctl(ifp, command, data);
4160 break;
4161 }
4162
4163 return (error);
4164}
4165
4166static void
4167bge_watchdog(struct bge_softc *sc)
4168{
4169 struct ifnet *ifp;
4170
4171 BGE_LOCK_ASSERT(sc);
4172
4173 if (sc->bge_timer == 0 || --sc->bge_timer)
4174 return;
4175
4176 ifp = sc->bge_ifp;
4177
4178 if_printf(ifp, "watchdog timeout -- resetting\n");
4179
4180 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4181 bge_init_locked(sc);
4182
4183 ifp->if_oerrors++;
4184}
4185
4186/*
4187 * Stop the adapter and free any mbufs allocated to the
4188 * RX and TX lists.
4189 */
4190static void
4191bge_stop(struct bge_softc *sc)
4192{
4193 struct ifnet *ifp;
4194 struct ifmedia_entry *ifm;
4195 struct mii_data *mii = NULL;
4196 int mtmp, itmp;
4197
4198 BGE_LOCK_ASSERT(sc);
4199
4200 ifp = sc->bge_ifp;
4201
4202 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
4203 mii = device_get_softc(sc->bge_miibus);
4204
4205 callout_stop(&sc->bge_stat_ch);
4206
4207 /*
4208 * Disable all of the receiver blocks.
4209 */
4210 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4211 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4212 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4213 if (!(BGE_IS_5705_PLUS(sc)))
4214 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4215 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4216 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4217 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4218
4219 /*
4220 * Disable all of the transmit blocks.
4221 */
4222 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4223 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4224 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4225 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4226 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4227 if (!(BGE_IS_5705_PLUS(sc)))
4228 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4229 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4230
4231 /*
4232 * Shut down all of the memory managers and related
4233 * state machines.
4234 */
4235 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4236 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4237 if (!(BGE_IS_5705_PLUS(sc)))
4238 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4239 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4240 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4241 if (!(BGE_IS_5705_PLUS(sc))) {
4242 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4243 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4244 }
4245
4246 /* Disable host interrupts. */
4247 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4248 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4249
4250 /*
4251 * Tell firmware we're shutting down.
4252 */
4253
4254 bge_stop_fw(sc);
4255 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4256 bge_reset(sc);
4257 bge_sig_legacy(sc, BGE_RESET_STOP);
4258 bge_sig_post_reset(sc, BGE_RESET_STOP);
4259
4260 /*
4261 * Keep the ASF firmware running if up.
4262 */
4263 if (sc->bge_asf_mode & ASF_STACKUP)
4264 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4265 else
4266 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4267
4268 /* Free the RX lists. */
4269 bge_free_rx_ring_std(sc);
4270
4271 /* Free jumbo RX list. */
4272 if (BGE_IS_JUMBO_CAPABLE(sc))
4273 bge_free_rx_ring_jumbo(sc);
4274
4275 /* Free TX buffers. */
4276 bge_free_tx_ring(sc);
4277
4278 /*
4279 * Isolate/power down the PHY, but leave the media selection
4280 * unchanged so that things will be put back to normal when
4281 * we bring the interface back up.
4282 */
4283 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4284 itmp = ifp->if_flags;
4285 ifp->if_flags |= IFF_UP;
4286 /*
4287 * If we are called from bge_detach(), mii is already NULL.
4288 */
4289 if (mii != NULL) {
4290 ifm = mii->mii_media.ifm_cur;
4291 mtmp = ifm->ifm_media;
4292 ifm->ifm_media = IFM_ETHER | IFM_NONE;
4293 mii_mediachg(mii);
4294 ifm->ifm_media = mtmp;
4295 }
4296 ifp->if_flags = itmp;
4297 }
4298
4299 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4300
4301 /* Clear MAC's link state (PHY may still have link UP). */
4302 if (bootverbose && sc->bge_link)
4303 if_printf(sc->bge_ifp, "link DOWN\n");
4304 sc->bge_link = 0;
4305
4306 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4307}
4308
4309/*
4310 * Stop all chip I/O so that the kernel's probe routines don't
4311 * get confused by errant DMAs when rebooting.
4312 */
4313static int
4314bge_shutdown(device_t dev)
4315{
4316 struct bge_softc *sc;
4317
4318 sc = device_get_softc(dev);
4319 BGE_LOCK(sc);
4320 bge_stop(sc);
4321 bge_reset(sc);
4322 BGE_UNLOCK(sc);
4323
4324 return (0);
4325}
4326
4327static int
4328bge_suspend(device_t dev)
4329{
4330 struct bge_softc *sc;
4331
4332 sc = device_get_softc(dev);
4333 BGE_LOCK(sc);
4334 bge_stop(sc);
4335 BGE_UNLOCK(sc);
4336
4337 return (0);
4338}
4339
4340static int
4341bge_resume(device_t dev)
4342{
4343 struct bge_softc *sc;
4344 struct ifnet *ifp;
4345
4346 sc = device_get_softc(dev);
4347 BGE_LOCK(sc);
4348 ifp = sc->bge_ifp;
4349 if (ifp->if_flags & IFF_UP) {
4350 bge_init_locked(sc);
4351 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4352 bge_start_locked(ifp);
4353 }
4354 BGE_UNLOCK(sc);
4355
4356 return (0);
4357}
4358
4359static void
4360bge_link_upd(struct bge_softc *sc)
4361{
4362 struct mii_data *mii;
4363 uint32_t link, status;
4364
4365 BGE_LOCK_ASSERT(sc);
4366
4367 /* Clear 'pending link event' flag. */
4368 sc->bge_link_evt = 0;
4369
4370 /*
4371 * Process link state changes.
4372 * Grrr. The link status word in the status block does
4373 * not work correctly on the BCM5700 rev AX and BX chips,
4374 * according to all available information. Hence, we have
4375 * to enable MII interrupts in order to properly obtain
4376 * async link changes. Unfortunately, this also means that
4377 * we have to read the MAC status register to detect link
4378 * changes, thereby adding an additional register access to
4379 * the interrupt handler.
4380 *
4381 * XXX: perhaps link state detection procedure used for
4382 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4383 */
4384
4385 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4386 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4387 status = CSR_READ_4(sc, BGE_MAC_STS);
4388 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4389 mii = device_get_softc(sc->bge_miibus);
4390 mii_pollstat(mii);
4391 if (!sc->bge_link &&
4392 mii->mii_media_status & IFM_ACTIVE &&
4393 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4394 sc->bge_link++;
4395 if (bootverbose)
4396 if_printf(sc->bge_ifp, "link UP\n");
4397 } else if (sc->bge_link &&
4398 (!(mii->mii_media_status & IFM_ACTIVE) ||
4399 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4400 sc->bge_link = 0;
4401 if (bootverbose)
4402 if_printf(sc->bge_ifp, "link DOWN\n");
4403 }
4404
4405 /* Clear the interrupt. */
4406 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4407 BGE_EVTENB_MI_INTERRUPT);
4408 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4409 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4410 BRGPHY_INTRS);
4411 }
4412 return;
4413 }
4414
4415 if (sc->bge_flags & BGE_FLAG_TBI) {
4416 status = CSR_READ_4(sc, BGE_MAC_STS);
4417 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4418 if (!sc->bge_link) {
4419 sc->bge_link++;
4420 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4421 BGE_CLRBIT(sc, BGE_MAC_MODE,
4422 BGE_MACMODE_TBI_SEND_CFGS);
4423 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4424 if (bootverbose)
4425 if_printf(sc->bge_ifp, "link UP\n");
4426 if_link_state_change(sc->bge_ifp,
4427 LINK_STATE_UP);
4428 }
4429 } else if (sc->bge_link) {
4430 sc->bge_link = 0;
4431 if (bootverbose)
4432 if_printf(sc->bge_ifp, "link DOWN\n");
4433 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4434 }
4435 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4436 /*
4437 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4438 * in status word always set. Workaround this bug by reading
4439 * PHY link status directly.
4440 */
4441 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4442
4443 if (link != sc->bge_link ||
4444 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4445 mii = device_get_softc(sc->bge_miibus);
4446 mii_pollstat(mii);
4447 if (!sc->bge_link &&
4448 mii->mii_media_status & IFM_ACTIVE &&
4449 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4450 sc->bge_link++;
4451 if (bootverbose)
4452 if_printf(sc->bge_ifp, "link UP\n");
4453 } else if (sc->bge_link &&
4454 (!(mii->mii_media_status & IFM_ACTIVE) ||
4455 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4456 sc->bge_link = 0;
4457 if (bootverbose)
4458 if_printf(sc->bge_ifp, "link DOWN\n");
4459 }
4460 }
4461 } else {
4462 /*
4463 * Discard link events for MII/GMII controllers
4464 * if MI auto-polling is disabled.
4465 */
4466 }
4467
4468 /* Clear the attention. */
4469 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4470 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4471 BGE_MACSTAT_LINK_CHANGED);
4472}
4473
4474#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
4475 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
4476 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
4477 desc)
4478
4479static void
4480bge_add_sysctls(struct bge_softc *sc)
4481{
4482 struct sysctl_ctx_list *ctx;
4483 struct sysctl_oid_list *children, *schildren;
4484 struct sysctl_oid *tree;
4485
4486 ctx = device_get_sysctl_ctx(sc->bge_dev);
4487 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4488
4489#ifdef BGE_REGISTER_DEBUG
4490 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4491 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4492 "Debug Information");
4493
4494 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4495 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4496 "Register Read");
4497
4498 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4499 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4500 "Memory Read");
4501
4502#endif
4503
4504 if (BGE_IS_5705_PLUS(sc))
4505 return;
4506
4507 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4508 NULL, "BGE Statistics");
4509 schildren = children = SYSCTL_CHILDREN(tree);
4510 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
4511 children, COSFramesDroppedDueToFilters,
4512 "FramesDroppedDueToFilters");
4513 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
4514 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
4515 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
4516 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
4517 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
4518 children, nicNoMoreRxBDs, "NoMoreRxBDs");
4519 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
4520 children, ifInDiscards, "InputDiscards");
4521 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
4522 children, ifInErrors, "InputErrors");
4523 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
4524 children, nicRecvThresholdHit, "RecvThresholdHit");
4525 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
4526 children, nicDmaReadQueueFull, "DmaReadQueueFull");
4527 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
4528 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
4529 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
4530 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
4531 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
4532 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
4533 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
4534 children, nicRingStatusUpdate, "RingStatusUpdate");
4535 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
4536 children, nicInterrupts, "Interrupts");
4537 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
4538 children, nicAvoidedInterrupts, "AvoidedInterrupts");
4539 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
4540 children, nicSendThresholdHit, "SendThresholdHit");
4541
4542 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
4543 NULL, "BGE RX Statistics");
4544 children = SYSCTL_CHILDREN(tree);
4545 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
4546 children, rxstats.ifHCInOctets, "Octets");
4547 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
4548 children, rxstats.etherStatsFragments, "Fragments");
4549 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
4550 children, rxstats.ifHCInUcastPkts, "UcastPkts");
4551 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
4552 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
4553 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
4554 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
4555 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
4556 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
4557 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
4558 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
4559 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
4560 children, rxstats.xoffPauseFramesReceived,
4561 "xoffPauseFramesReceived");
4562 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
4563 children, rxstats.macControlFramesReceived,
4564 "ControlFramesReceived");
4565 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
4566 children, rxstats.xoffStateEntered, "xoffStateEntered");
4567 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
4568 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
4569 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
4570 children, rxstats.etherStatsJabbers, "Jabbers");
4571 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
4572 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
4573 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
4574 children, rxstats.inRangeLengthError, "inRangeLengthError");
4575 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
4576 children, rxstats.outRangeLengthError, "outRangeLengthError");
4577
4578 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
4579 NULL, "BGE TX Statistics");
4580 children = SYSCTL_CHILDREN(tree);
4581 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
4582 children, txstats.ifHCOutOctets, "Octets");
4583 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
4584 children, txstats.etherStatsCollisions, "Collisions");
4585 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
4586 children, txstats.outXonSent, "XonSent");
4587 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
4588 children, txstats.outXoffSent, "XoffSent");
4589 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
4590 children, txstats.flowControlDone, "flowControlDone");
4591 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
4592 children, txstats.dot3StatsInternalMacTransmitErrors,
4593 "InternalMacTransmitErrors");
4594 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
4595 children, txstats.dot3StatsSingleCollisionFrames,
4596 "SingleCollisionFrames");
4597 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
4598 children, txstats.dot3StatsMultipleCollisionFrames,
4599 "MultipleCollisionFrames");
4600 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
4601 children, txstats.dot3StatsDeferredTransmissions,
4602 "DeferredTransmissions");
4603 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
4604 children, txstats.dot3StatsExcessiveCollisions,
4605 "ExcessiveCollisions");
4606 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
4607 children, txstats.dot3StatsLateCollisions,
4608 "LateCollisions");
4609 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
4610 children, txstats.ifHCOutUcastPkts, "UcastPkts");
4611 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
4612 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
4613 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
4614 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
4615 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
4616 children, txstats.dot3StatsCarrierSenseErrors,
4617 "CarrierSenseErrors");
4618 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
4619 children, txstats.ifOutDiscards, "Discards");
4620 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
4621 children, txstats.ifOutErrors, "Errors");
4622}
4623
4624static int
4625bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
4626{
4627 struct bge_softc *sc;
4628 uint32_t result;
4629 int offset;
4630
4631 sc = (struct bge_softc *)arg1;
4632 offset = arg2;
4633 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
4634 offsetof(bge_hostaddr, bge_addr_lo));
4635 return (sysctl_handle_int(oidp, &result, 0, req));
4636}
4637
4638#ifdef BGE_REGISTER_DEBUG
4639static int
4640bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4641{
4642 struct bge_softc *sc;
4643 uint16_t *sbdata;
4644 int error;
4645 int result;
4646 int i, j;
4647
4648 result = -1;
4649 error = sysctl_handle_int(oidp, &result, 0, req);
4650 if (error || (req->newptr == NULL))
4651 return (error);
4652
4653 if (result == 1) {
4654 sc = (struct bge_softc *)arg1;
4655
4656 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
4657 printf("Status Block:\n");
4658 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
4659 printf("%06x:", i);
4660 for (j = 0; j < 8; j++) {
4661 printf(" %04x", sbdata[i]);
4662 i += 4;
4663 }
4664 printf("\n");
4665 }
4666
4667 printf("Registers:\n");
4668 for (i = 0x800; i < 0xA00; ) {
4669 printf("%06x:", i);
4670 for (j = 0; j < 8; j++) {
4671 printf(" %08x", CSR_READ_4(sc, i));
4672 i += 4;
4673 }
4674 printf("\n");
4675 }
4676
4677 printf("Hardware Flags:\n");
4678 if (BGE_IS_575X_PLUS(sc))
4679 printf(" - 575X Plus\n");
4680 if (BGE_IS_5705_PLUS(sc))
4681 printf(" - 5705 Plus\n");
4682 if (BGE_IS_5714_FAMILY(sc))
4683 printf(" - 5714 Family\n");
4684 if (BGE_IS_5700_FAMILY(sc))
4685 printf(" - 5700 Family\n");
4686 if (sc->bge_flags & BGE_FLAG_JUMBO)
4687 printf(" - Supports Jumbo Frames\n");
4688 if (sc->bge_flags & BGE_FLAG_PCIX)
4689 printf(" - PCI-X Bus\n");
4690 if (sc->bge_flags & BGE_FLAG_PCIE)
4691 printf(" - PCI Express Bus\n");
4692 if (sc->bge_flags & BGE_FLAG_NO_3LED)
4693 printf(" - No 3 LEDs\n");
4694 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
4695 printf(" - RX Alignment Bug\n");
4696 }
4697
4698 return (error);
4699}
4700
4701static int
4702bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
4703{
4704 struct bge_softc *sc;
4705 int error;
4706 uint16_t result;
4707 uint32_t val;
4708
4709 result = -1;
4710 error = sysctl_handle_int(oidp, &result, 0, req);
4711 if (error || (req->newptr == NULL))
4712 return (error);
4713
4714 if (result < 0x8000) {
4715 sc = (struct bge_softc *)arg1;
4716 val = CSR_READ_4(sc, result);
4717 printf("reg 0x%06X = 0x%08X\n", result, val);
4718 }
4719
4720 return (error);
4721}
4722
4723static int
4724bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
4725{
4726 struct bge_softc *sc;
4727 int error;
4728 uint16_t result;
4729 uint32_t val;
4730
4731 result = -1;
4732 error = sysctl_handle_int(oidp, &result, 0, req);
4733 if (error || (req->newptr == NULL))
4734 return (error);
4735
4736 if (result < 0x8000) {
4737 sc = (struct bge_softc *)arg1;
4738 val = bge_readmem_ind(sc, result);
4739 printf("mem 0x%06X = 0x%08X\n", result, val);
4740 }
4741
4742 return (error);
4743}
4744#endif
4745
4746static int
4747bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
4748{
4749
4750 if (sc->bge_flags & BGE_FLAG_EADDR)
4751 return (1);
4752
4753#ifdef __sparc64__
4754 OF_getetheraddr(sc->bge_dev, ether_addr);
4755 return (0);
4756#endif
4757 return (1);
4758}
4759
4760static int
4761bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4762{
4763 uint32_t mac_addr;
4764
4765 mac_addr = bge_readmem_ind(sc, 0x0c14);
4766 if ((mac_addr >> 16) == 0x484b) {
4767 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4768 ether_addr[1] = (uint8_t)mac_addr;
4769 mac_addr = bge_readmem_ind(sc, 0x0c18);
4770 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4771 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4772 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4773 ether_addr[5] = (uint8_t)mac_addr;
4774 return (0);
4775 }
4776 return (1);
4777}
4778
4779static int
4780bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4781{
4782 int mac_offset = BGE_EE_MAC_OFFSET;
4783
4784 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4785 mac_offset = BGE_EE_MAC_OFFSET_5906;
4786
4787 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
4788 ETHER_ADDR_LEN));
4789}
4790
4791static int
4792bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4793{
4794
4795 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4796 return (1);
4797
4798 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4799 ETHER_ADDR_LEN));
4800}
4801
4802static int
4803bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
4804{
4805 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4806 /* NOTE: Order is critical */
4807 bge_get_eaddr_fw,
4808 bge_get_eaddr_mem,
4809 bge_get_eaddr_nvram,
4810 bge_get_eaddr_eeprom,
4811 NULL
4812 };
4813 const bge_eaddr_fcn_t *func;
4814
4815 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4816 if ((*func)(sc, eaddr) == 0)
4817 break;
4818 }
4819 return (*func == NULL ? ENXIO : 0);
4820}