Deleted Added
full compact
if_bge.c (164769) if_bge.c (164780)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 164769 2006-11-30 13:40:39Z glebius $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 164780 2006-12-01 01:08:52Z jkim $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83
84#include <net/if.h>
85#include <net/if_arp.h>
86#include <net/ethernet.h>
87#include <net/if_dl.h>
88#include <net/if_media.h>
89
90#include <net/bpf.h>
91
92#include <net/if_types.h>
93#include <net/if_vlan_var.h>
94
95#include <netinet/in_systm.h>
96#include <netinet/in.h>
97#include <netinet/ip.h>
98
99#include <machine/bus.h>
100#include <machine/resource.h>
101#include <sys/bus.h>
102#include <sys/rman.h>
103
104#include <dev/mii/mii.h>
105#include <dev/mii/miivar.h>
106#include "miidevs.h"
107#include <dev/mii/brgphyreg.h>
108
109#include <dev/pci/pcireg.h>
110#include <dev/pci/pcivar.h>
111
112#include <dev/bge/if_bgereg.h>
113
114#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
115#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
116
117MODULE_DEPEND(bge, pci, 1, 1, 1);
118MODULE_DEPEND(bge, ether, 1, 1, 1);
119MODULE_DEPEND(bge, miibus, 1, 1, 1);
120
121/* "device miibus" required. See GENERIC if you get errors here. */
122#include "miibus_if.h"
123
124/*
125 * Various supported device vendors/types and their names. Note: the
126 * spec seems to indicate that the hardware still has Alteon's vendor
127 * ID burned into it, though it will always be overriden by the vendor
128 * ID in the EEPROM. Just to be safe, we cover all possibilities.
129 */
130static struct bge_type {
131 uint16_t bge_vid;
132 uint16_t bge_did;
133} bge_devs[] = {
134 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
135 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
136
137 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
138 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
139 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
140
141 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
142
143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
191
192 { SK_VENDORID, SK_DEVICEID_ALTIMA },
193
194 { TC_VENDORID, TC_DEVICEID_3C996 },
195
196 { 0, 0 }
197};
198
199static const struct bge_vendor {
200 uint16_t v_id;
201 const char *v_name;
202} bge_vendors[] = {
203 { ALTEON_VENDORID, "Alteon" },
204 { ALTIMA_VENDORID, "Altima" },
205 { APPLE_VENDORID, "Apple" },
206 { BCOM_VENDORID, "Broadcom" },
207 { SK_VENDORID, "SysKonnect" },
208 { TC_VENDORID, "3Com" },
209
210 { 0, NULL }
211};
212
213static const struct bge_revision {
214 uint32_t br_chipid;
215 const char *br_name;
216} bge_revisions[] = {
217 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
218 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
219 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
220 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
221 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
222 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
223 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
224 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
225 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
226 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
227 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
228 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
229 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
230 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
231 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
232 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
233 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
234 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
235 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
236 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
237 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
238 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
239 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
240 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
241 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
242 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
243 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
244 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
245 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
246 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
247 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
248 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
249 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
250 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
251 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
252 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
253 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
254 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
255 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
256 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
257 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
258 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
259
260 { 0, NULL }
261};
262
263/*
264 * Some defaults for major revisions, so that newer steppings
265 * that we don't know about have a shot at working.
266 */
267static const struct bge_revision bge_majorrevs[] = {
268 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
269 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
270 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
271 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
272 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
273 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
274 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
275 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
276 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
277 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
278 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
279 { BGE_ASICREV_BCM5787, "unknown BCM5787" },
280
281 { 0, NULL }
282};
283
284#define BGE_IS_5705_OR_BEYOND(sc) \
285 ((sc)->bge_asicrev == BGE_ASICREV_BCM5705 || \
286 (sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \
287 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
288 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
289 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \
290 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \
291 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \
292 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
293
294#define BGE_IS_575X_PLUS(sc) \
295 ((sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \
296 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
297 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
298 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \
299 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \
300 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \
301 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
302
303#define BGE_IS_5714_FAMILY(sc) \
304 ((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
305 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
306 (sc)->bge_asicrev == BGE_ASICREV_BCM5714)
307
308#define BGE_IS_JUMBO_CAPABLE(sc) \
309 ((sc)->bge_asicrev == BGE_ASICREV_BCM5700 || \
310 (sc)->bge_asicrev == BGE_ASICREV_BCM5701 || \
311 (sc)->bge_asicrev == BGE_ASICREV_BCM5703 || \
312 (sc)->bge_asicrev == BGE_ASICREV_BCM5704)
313
314const struct bge_revision * bge_lookup_rev(uint32_t);
315const struct bge_vendor * bge_lookup_vendor(uint16_t);
316static int bge_probe(device_t);
317static int bge_attach(device_t);
318static int bge_detach(device_t);
319static int bge_suspend(device_t);
320static int bge_resume(device_t);
321static void bge_release_resources(struct bge_softc *);
322static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
323static int bge_dma_alloc(device_t);
324static void bge_dma_free(struct bge_softc *);
325
326static void bge_txeof(struct bge_softc *);
327static void bge_rxeof(struct bge_softc *);
328
329static void bge_asf_driver_up (struct bge_softc *);
330static void bge_tick(void *);
331static void bge_stats_update(struct bge_softc *);
332static void bge_stats_update_regs(struct bge_softc *);
333static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
334
335static void bge_intr(void *);
336static void bge_start_locked(struct ifnet *);
337static void bge_start(struct ifnet *);
338static int bge_ioctl(struct ifnet *, u_long, caddr_t);
339static void bge_init_locked(struct bge_softc *);
340static void bge_init(void *);
341static void bge_stop(struct bge_softc *);
342static void bge_watchdog(struct bge_softc *);
343static void bge_shutdown(device_t);
344static int bge_ifmedia_upd_locked(struct ifnet *);
345static int bge_ifmedia_upd(struct ifnet *);
346static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
347
348static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
349static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
350
351static void bge_setpromisc(struct bge_softc *);
352static void bge_setmulti(struct bge_softc *);
353
354static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
355static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
356static int bge_init_rx_ring_std(struct bge_softc *);
357static void bge_free_rx_ring_std(struct bge_softc *);
358static int bge_init_rx_ring_jumbo(struct bge_softc *);
359static void bge_free_rx_ring_jumbo(struct bge_softc *);
360static void bge_free_tx_ring(struct bge_softc *);
361static int bge_init_tx_ring(struct bge_softc *);
362
363static int bge_chipinit(struct bge_softc *);
364static int bge_blockinit(struct bge_softc *);
365
366static uint32_t bge_readmem_ind(struct bge_softc *, int);
367static void bge_writemem_ind(struct bge_softc *, int, int);
368#ifdef notdef
369static uint32_t bge_readreg_ind(struct bge_softc *, int);
370#endif
371static void bge_writereg_ind(struct bge_softc *, int, int);
372
373static int bge_miibus_readreg(device_t, int, int);
374static int bge_miibus_writereg(device_t, int, int, int);
375static void bge_miibus_statchg(device_t);
376#ifdef DEVICE_POLLING
377static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
378#endif
379
380#define BGE_RESET_START 1
381#define BGE_RESET_STOP 2
382static void bge_sig_post_reset(struct bge_softc *, int);
383static void bge_sig_legacy(struct bge_softc *, int);
384static void bge_sig_pre_reset(struct bge_softc *, int);
385static int bge_reset(struct bge_softc *);
386static void bge_link_upd(struct bge_softc *);
387
388static device_method_t bge_methods[] = {
389 /* Device interface */
390 DEVMETHOD(device_probe, bge_probe),
391 DEVMETHOD(device_attach, bge_attach),
392 DEVMETHOD(device_detach, bge_detach),
393 DEVMETHOD(device_shutdown, bge_shutdown),
394 DEVMETHOD(device_suspend, bge_suspend),
395 DEVMETHOD(device_resume, bge_resume),
396
397 /* bus interface */
398 DEVMETHOD(bus_print_child, bus_generic_print_child),
399 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
400
401 /* MII interface */
402 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
403 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
404 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
405
406 { 0, 0 }
407};
408
409static driver_t bge_driver = {
410 "bge",
411 bge_methods,
412 sizeof(struct bge_softc)
413};
414
415static devclass_t bge_devclass;
416
417DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
418DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
419
420static int bge_fake_autoneg = 0;
421static int bge_allow_asf = 1;
422
423TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
424TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
425
426SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
427SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0,
428 "Enable fake autonegotiation for certain blade systems");
429SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
430 "Allow ASF mode if available");
431
432static uint32_t
433bge_readmem_ind(struct bge_softc *sc, int off)
434{
435 device_t dev;
436
437 dev = sc->bge_dev;
438
439 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
440 return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
441}
442
443static void
444bge_writemem_ind(struct bge_softc *sc, int off, int val)
445{
446 device_t dev;
447
448 dev = sc->bge_dev;
449
450 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
451 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
452}
453
454#ifdef notdef
455static uint32_t
456bge_readreg_ind(struct bge_softc *sc, int off)
457{
458 device_t dev;
459
460 dev = sc->bge_dev;
461
462 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
463 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
464}
465#endif
466
467static void
468bge_writereg_ind(struct bge_softc *sc, int off, int val)
469{
470 device_t dev;
471
472 dev = sc->bge_dev;
473
474 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
475 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
476}
477
478/*
479 * Map a single buffer address.
480 */
481
482static void
483bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
484{
485 struct bge_dmamap_arg *ctx;
486
487 if (error)
488 return;
489
490 ctx = arg;
491
492 if (nseg > ctx->bge_maxsegs) {
493 ctx->bge_maxsegs = 0;
494 return;
495 }
496
497 ctx->bge_busaddr = segs->ds_addr;
498}
499
500/*
501 * Read a byte of data stored in the EEPROM at address 'addr.' The
502 * BCM570x supports both the traditional bitbang interface and an
503 * auto access interface for reading the EEPROM. We use the auto
504 * access method.
505 */
506static uint8_t
507bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
508{
509 int i;
510 uint32_t byte = 0;
511
512 /*
513 * Enable use of auto EEPROM access so we can avoid
514 * having to use the bitbang method.
515 */
516 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
517
518 /* Reset the EEPROM, load the clock period. */
519 CSR_WRITE_4(sc, BGE_EE_ADDR,
520 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
521 DELAY(20);
522
523 /* Issue the read EEPROM command. */
524 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
525
526 /* Wait for completion */
527 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
528 DELAY(10);
529 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
530 break;
531 }
532
533 if (i == BGE_TIMEOUT) {
534 device_printf(sc->bge_dev, "EEPROM read timed out\n");
535 return (1);
536 }
537
538 /* Get result. */
539 byte = CSR_READ_4(sc, BGE_EE_DATA);
540
541 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
542
543 return (0);
544}
545
546/*
547 * Read a sequence of bytes from the EEPROM.
548 */
549static int
550bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
551{
552 int i, error = 0;
553 uint8_t byte = 0;
554
555 for (i = 0; i < cnt; i++) {
556 error = bge_eeprom_getbyte(sc, off + i, &byte);
557 if (error)
558 break;
559 *(dest + i) = byte;
560 }
561
562 return (error ? 1 : 0);
563}
564
565static int
566bge_miibus_readreg(device_t dev, int phy, int reg)
567{
568 struct bge_softc *sc;
569 uint32_t val, autopoll;
570 int i;
571
572 sc = device_get_softc(dev);
573
574 /*
575 * Broadcom's own driver always assumes the internal
576 * PHY is at GMII address 1. On some chips, the PHY responds
577 * to accesses at all addresses, which could cause us to
578 * bogusly attach the PHY 32 times at probe type. Always
579 * restricting the lookup to address 1 is simpler than
580 * trying to figure out which chips revisions should be
581 * special-cased.
582 */
583 if (phy != 1)
584 return (0);
585
586 /* Reading with autopolling on may trigger PCI errors */
587 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
588 if (autopoll & BGE_MIMODE_AUTOPOLL) {
589 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
590 DELAY(40);
591 }
592
593 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
594 BGE_MIPHY(phy)|BGE_MIREG(reg));
595
596 for (i = 0; i < BGE_TIMEOUT; i++) {
597 val = CSR_READ_4(sc, BGE_MI_COMM);
598 if (!(val & BGE_MICOMM_BUSY))
599 break;
600 }
601
602 if (i == BGE_TIMEOUT) {
603 device_printf(sc->bge_dev, "PHY read timed out\n");
604 val = 0;
605 goto done;
606 }
607
608 val = CSR_READ_4(sc, BGE_MI_COMM);
609
610done:
611 if (autopoll & BGE_MIMODE_AUTOPOLL) {
612 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
613 DELAY(40);
614 }
615
616 if (val & BGE_MICOMM_READFAIL)
617 return (0);
618
619 return (val & 0xFFFF);
620}
621
622static int
623bge_miibus_writereg(device_t dev, int phy, int reg, int val)
624{
625 struct bge_softc *sc;
626 uint32_t autopoll;
627 int i;
628
629 sc = device_get_softc(dev);
630
631 /* Reading with autopolling on may trigger PCI errors */
632 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
633 if (autopoll & BGE_MIMODE_AUTOPOLL) {
634 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
635 DELAY(40);
636 }
637
638 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
639 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
640
641 for (i = 0; i < BGE_TIMEOUT; i++) {
642 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
643 break;
644 }
645
646 if (autopoll & BGE_MIMODE_AUTOPOLL) {
647 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
648 DELAY(40);
649 }
650
651 if (i == BGE_TIMEOUT) {
652 device_printf(sc->bge_dev, "PHY read timed out\n");
653 return (0);
654 }
655
656 return (0);
657}
658
659static void
660bge_miibus_statchg(device_t dev)
661{
662 struct bge_softc *sc;
663 struct mii_data *mii;
664 sc = device_get_softc(dev);
665 mii = device_get_softc(sc->bge_miibus);
666
667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
670 else
671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
672
673 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
674 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
675 else
676 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
677}
678
679/*
680 * Intialize a standard receive ring descriptor.
681 */
682static int
683bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
684{
685 struct mbuf *m_new = NULL;
686 struct bge_rx_bd *r;
687 struct bge_dmamap_arg ctx;
688 int error;
689
690 if (m == NULL) {
691 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
692 if (m_new == NULL)
693 return (ENOBUFS);
694 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
695 } else {
696 m_new = m;
697 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
698 m_new->m_data = m_new->m_ext.ext_buf;
699 }
700
701 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
702 m_adj(m_new, ETHER_ALIGN);
703 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
704 r = &sc->bge_ldata.bge_rx_std_ring[i];
705 ctx.bge_maxsegs = 1;
706 ctx.sc = sc;
707 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
708 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
709 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
710 if (error || ctx.bge_maxsegs == 0) {
711 if (m == NULL) {
712 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
713 m_freem(m_new);
714 }
715 return (ENOMEM);
716 }
717 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
718 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
719 r->bge_flags = BGE_RXBDFLAG_END;
720 r->bge_len = m_new->m_len;
721 r->bge_idx = i;
722
723 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
724 sc->bge_cdata.bge_rx_std_dmamap[i],
725 BUS_DMASYNC_PREREAD);
726
727 return (0);
728}
729
730/*
731 * Initialize a jumbo receive ring descriptor. This allocates
732 * a jumbo buffer from the pool managed internally by the driver.
733 */
734static int
735bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
736{
737 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
738 struct bge_extrx_bd *r;
739 struct mbuf *m_new = NULL;
740 int nsegs;
741 int error;
742
743 if (m == NULL) {
744 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
745 if (m_new == NULL)
746 return (ENOBUFS);
747
748 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
749 if (!(m_new->m_flags & M_EXT)) {
750 m_freem(m_new);
751 return (ENOBUFS);
752 }
753 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
754 } else {
755 m_new = m;
756 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
757 m_new->m_data = m_new->m_ext.ext_buf;
758 }
759
760 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
761 m_adj(m_new, ETHER_ALIGN);
762
763 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
764 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
765 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
766 if (error) {
767 if (m == NULL)
768 m_freem(m_new);
769 return (error);
770 }
771 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
772
773 /*
774 * Fill in the extended RX buffer descriptor.
775 */
776 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
777 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
778 r->bge_idx = i;
779 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
780 switch (nsegs) {
781 case 4:
782 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
783 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
784 r->bge_len3 = segs[3].ds_len;
785 case 3:
786 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
787 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
788 r->bge_len2 = segs[2].ds_len;
789 case 2:
790 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
791 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
792 r->bge_len1 = segs[1].ds_len;
793 case 1:
794 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
795 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
796 r->bge_len0 = segs[0].ds_len;
797 break;
798 default:
799 panic("%s: %d segments\n", __func__, nsegs);
800 }
801
802 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
803 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
804 BUS_DMASYNC_PREREAD);
805
806 return (0);
807}
808
809/*
810 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
811 * that's 1MB or memory, which is a lot. For now, we fill only the first
812 * 256 ring entries and hope that our CPU is fast enough to keep up with
813 * the NIC.
814 */
815static int
816bge_init_rx_ring_std(struct bge_softc *sc)
817{
818 int i;
819
820 for (i = 0; i < BGE_SSLOTS; i++) {
821 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
822 return (ENOBUFS);
823 };
824
825 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
826 sc->bge_cdata.bge_rx_std_ring_map,
827 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
828
829 sc->bge_std = i - 1;
830 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
831
832 return (0);
833}
834
835static void
836bge_free_rx_ring_std(struct bge_softc *sc)
837{
838 int i;
839
840 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
841 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
842 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
843 sc->bge_cdata.bge_rx_std_dmamap[i],
844 BUS_DMASYNC_POSTREAD);
845 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
846 sc->bge_cdata.bge_rx_std_dmamap[i]);
847 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
848 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
849 }
850 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
851 sizeof(struct bge_rx_bd));
852 }
853}
854
855static int
856bge_init_rx_ring_jumbo(struct bge_softc *sc)
857{
858 struct bge_rcb *rcb;
859 int i;
860
861 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
862 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
863 return (ENOBUFS);
864 };
865
866 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
867 sc->bge_cdata.bge_rx_jumbo_ring_map,
868 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
869
870 sc->bge_jumbo = i - 1;
871
872 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
873 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
874 BGE_RCB_FLAG_USE_EXT_RX_BD);
875 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
876
877 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
878
879 return (0);
880}
881
882static void
883bge_free_rx_ring_jumbo(struct bge_softc *sc)
884{
885 int i;
886
887 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
888 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
889 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
890 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
891 BUS_DMASYNC_POSTREAD);
892 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
893 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
894 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
895 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
896 }
897 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
898 sizeof(struct bge_extrx_bd));
899 }
900}
901
902static void
903bge_free_tx_ring(struct bge_softc *sc)
904{
905 int i;
906
907 if (sc->bge_ldata.bge_tx_ring == NULL)
908 return;
909
910 for (i = 0; i < BGE_TX_RING_CNT; i++) {
911 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
912 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
913 sc->bge_cdata.bge_tx_dmamap[i],
914 BUS_DMASYNC_POSTWRITE);
915 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
916 sc->bge_cdata.bge_tx_dmamap[i]);
917 m_freem(sc->bge_cdata.bge_tx_chain[i]);
918 sc->bge_cdata.bge_tx_chain[i] = NULL;
919 }
920 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
921 sizeof(struct bge_tx_bd));
922 }
923}
924
925static int
926bge_init_tx_ring(struct bge_softc *sc)
927{
928 sc->bge_txcnt = 0;
929 sc->bge_tx_saved_considx = 0;
930
931 /* Initialize transmit producer index for host-memory send ring. */
932 sc->bge_tx_prodidx = 0;
933 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
934
935 /* 5700 b2 errata */
936 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
937 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
938
939 /* NIC-memory send ring not used; initialize to zero. */
940 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
941 /* 5700 b2 errata */
942 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
943 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
944
945 return (0);
946}
947
948static void
949bge_setpromisc(struct bge_softc *sc)
950{
951 struct ifnet *ifp;
952
953 BGE_LOCK_ASSERT(sc);
954
955 ifp = sc->bge_ifp;
956
957 /*
958 * Enable or disable promiscuous mode as needed.
959 * Do not strip VLAN tag when promiscuous mode is enabled.
960 */
961 if (ifp->if_flags & IFF_PROMISC)
962 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC |
963 BGE_RXMODE_RX_KEEP_VLAN_DIAG);
964 else
965 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC |
966 BGE_RXMODE_RX_KEEP_VLAN_DIAG);
967}
968
969static void
970bge_setmulti(struct bge_softc *sc)
971{
972 struct ifnet *ifp;
973 struct ifmultiaddr *ifma;
974 uint32_t hashes[4] = { 0, 0, 0, 0 };
975 int h, i;
976
977 BGE_LOCK_ASSERT(sc);
978
979 ifp = sc->bge_ifp;
980
981 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
982 for (i = 0; i < 4; i++)
983 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
984 return;
985 }
986
987 /* First, zot all the existing filters. */
988 for (i = 0; i < 4; i++)
989 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
990
991 /* Now program new ones. */
992 IF_ADDR_LOCK(ifp);
993 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
994 if (ifma->ifma_addr->sa_family != AF_LINK)
995 continue;
996 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
997 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
998 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
999 }
1000 IF_ADDR_UNLOCK(ifp);
1001
1002 for (i = 0; i < 4; i++)
1003 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1004}
1005
1006static void
1007bge_sig_pre_reset(sc, type)
1008 struct bge_softc *sc;
1009 int type;
1010{
1011 /*
1012 * Some chips don't like this so only do this if ASF is enabled
1013 */
1014 if (sc->bge_asf_mode)
1015 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1016
1017 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1018 switch (type) {
1019 case BGE_RESET_START:
1020 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1021 break;
1022 case BGE_RESET_STOP:
1023 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1024 break;
1025 }
1026 }
1027}
1028
1029static void
1030bge_sig_post_reset(sc, type)
1031 struct bge_softc *sc;
1032 int type;
1033{
1034 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1035 switch (type) {
1036 case BGE_RESET_START:
1037 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1038 /* START DONE */
1039 break;
1040 case BGE_RESET_STOP:
1041 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1042 break;
1043 }
1044 }
1045}
1046
1047static void
1048bge_sig_legacy(sc, type)
1049 struct bge_softc *sc;
1050 int type;
1051{
1052 if (sc->bge_asf_mode) {
1053 switch (type) {
1054 case BGE_RESET_START:
1055 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1056 break;
1057 case BGE_RESET_STOP:
1058 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1059 break;
1060 }
1061 }
1062}
1063
1064void bge_stop_fw(struct bge_softc *);
1065void
1066bge_stop_fw(sc)
1067 struct bge_softc *sc;
1068{
1069 int i;
1070
1071 if (sc->bge_asf_mode) {
1072 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1073 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1074 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
1075
1076 for (i = 0; i < 100; i++ ) {
1077 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1078 break;
1079 DELAY(10);
1080 }
1081 }
1082}
1083
1084/*
1085 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1086 * self-test results.
1087 */
1088static int
1089bge_chipinit(struct bge_softc *sc)
1090{
1091 uint32_t dma_rw_ctl;
1092 int i;
1093
1094 /* Set endianness before we access any non-PCI registers. */
1095 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1096
1097 /*
1098 * Check the 'ROM failed' bit on the RX CPU to see if
1099 * self-tests passed.
1100 */
1101 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1102 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1103 return (ENODEV);
1104 }
1105
1106 /* Clear the MAC control register */
1107 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1108
1109 /*
1110 * Clear the MAC statistics block in the NIC's
1111 * internal memory.
1112 */
1113 for (i = BGE_STATS_BLOCK;
1114 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1115 BGE_MEMWIN_WRITE(sc, i, 0);
1116
1117 for (i = BGE_STATUS_BLOCK;
1118 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1119 BGE_MEMWIN_WRITE(sc, i, 0);
1120
1121 /* Set up the PCI DMA control register. */
1122 if (sc->bge_flags & BGE_FLAG_PCIE) {
1123 /* PCI Express bus */
1124 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1125 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1126 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1127 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1128 /* PCI-X bus */
1129 if (BGE_IS_5714_FAMILY(sc)) {
1130 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1131 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1132 /* XXX magic values, Broadcom-supplied Linux driver */
1133 if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1134 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1135 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1136 else
1137 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1138
1139 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1140 /*
1141 * The 5704 uses a different encoding of read/write
1142 * watermarks.
1143 */
1144 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1145 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1146 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1147 else
1148 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1149 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1150 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1151 (0x0F);
1152
1153 /*
1154 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1155 * for hardware bugs.
1156 */
1157 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1158 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1159 uint32_t tmp;
1160
1161 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1162 if (tmp == 0x6 || tmp == 0x7)
1163 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1164 }
1165 } else
1166 /* Conventional PCI bus */
1167 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1168 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1169 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1170 (0x0F);
1171
1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1173 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1174 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1175 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1176 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1177
1178 /*
1179 * Set up general mode register.
1180 */
1181 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1182 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1183 BGE_MODECTL_TX_NO_PHDR_CSUM);
1184
1185 /*
1186 * Tell the firmware the driver is running
1187 */
1188 if (sc->bge_asf_mode & ASF_STACKUP)
1189 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1190
1191 /*
1192 * Disable memory write invalidate. Apparently it is not supported
1193 * properly by these devices.
1194 */
1195 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1196
1197#ifdef __brokenalpha__
1198 /*
1199 * Must insure that we do not cross an 8K (bytes) boundary
1200 * for DMA reads. Our highest limit is 1K bytes. This is a
1201 * restriction on some ALPHA platforms with early revision
1202 * 21174 PCI chipsets, such as the AlphaPC 164lx
1203 */
1204 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1205 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1206#endif
1207
1208 /* Set the timer prescaler (always 66Mhz) */
1209 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1210
1211 return (0);
1212}
1213
1214static int
1215bge_blockinit(struct bge_softc *sc)
1216{
1217 struct bge_rcb *rcb;
1218 bus_size_t vrcb;
1219 bge_hostaddr taddr;
1220 int i;
1221
1222 /*
1223 * Initialize the memory window pointer register so that
1224 * we can access the first 32K of internal NIC RAM. This will
1225 * allow us to set up the TX send ring RCBs and the RX return
1226 * ring RCBs, plus other things which live in NIC memory.
1227 */
1228 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1229
1230 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1231
1232 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1233 /* Configure mbuf memory pool */
1234 if (sc->bge_flags & BGE_FLAG_EXTRAM) {
1235 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1236 BGE_EXT_SSRAM);
1237 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1238 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1239 else
1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1241 } else {
1242 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1243 BGE_BUFFPOOL_1);
1244 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1245 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1246 else
1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1248 }
1249
1250 /* Configure DMA resource pool */
1251 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1252 BGE_DMA_DESCRIPTORS);
1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1254 }
1255
1256 /* Configure mbuf pool watermarks */
1257 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1258 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1259 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1260 } else {
1261 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1262 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1263 }
1264 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1265
1266 /* Configure DMA resource watermarks */
1267 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1268 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1269
1270 /* Enable buffer manager */
1271 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1272 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1273 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1274
1275 /* Poll for buffer manager start indication */
1276 for (i = 0; i < BGE_TIMEOUT; i++) {
1277 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1278 break;
1279 DELAY(10);
1280 }
1281
1282 if (i == BGE_TIMEOUT) {
1283 device_printf(sc->bge_dev,
1284 "buffer manager failed to start\n");
1285 return (ENXIO);
1286 }
1287 }
1288
1289 /* Enable flow-through queues */
1290 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1291 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1292
1293 /* Wait until queue initialization is complete */
1294 for (i = 0; i < BGE_TIMEOUT; i++) {
1295 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1296 break;
1297 DELAY(10);
1298 }
1299
1300 if (i == BGE_TIMEOUT) {
1301 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1302 return (ENXIO);
1303 }
1304
1305 /* Initialize the standard RX ring control block */
1306 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1307 rcb->bge_hostaddr.bge_addr_lo =
1308 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1309 rcb->bge_hostaddr.bge_addr_hi =
1310 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1311 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1312 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1313 if (BGE_IS_5705_OR_BEYOND(sc))
1314 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1315 else
1316 rcb->bge_maxlen_flags =
1317 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1318 if (sc->bge_flags & BGE_FLAG_EXTRAM)
1319 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1320 else
1321 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1322 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1323 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1324
1325 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1326 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1327
1328 /*
1329 * Initialize the jumbo RX ring control block
1330 * We set the 'ring disabled' bit in the flags
1331 * field until we're actually ready to start
1332 * using this ring (i.e. once we set the MTU
1333 * high enough to require it).
1334 */
1335 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1336 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1337
1338 rcb->bge_hostaddr.bge_addr_lo =
1339 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1340 rcb->bge_hostaddr.bge_addr_hi =
1341 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1342 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1343 sc->bge_cdata.bge_rx_jumbo_ring_map,
1344 BUS_DMASYNC_PREREAD);
1345 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1346 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1347 if (sc->bge_flags & BGE_FLAG_EXTRAM)
1348 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1349 else
1350 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1351 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1352 rcb->bge_hostaddr.bge_addr_hi);
1353 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1354 rcb->bge_hostaddr.bge_addr_lo);
1355
1356 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1357 rcb->bge_maxlen_flags);
1358 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1359
1360 /* Set up dummy disabled mini ring RCB */
1361 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1362 rcb->bge_maxlen_flags =
1363 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1364 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1365 rcb->bge_maxlen_flags);
1366 }
1367
1368 /*
1369 * Set the BD ring replentish thresholds. The recommended
1370 * values are 1/8th the number of descriptors allocated to
1371 * each ring.
1372 */
1373 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1374 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1375
1376 /*
1377 * Disable all unused send rings by setting the 'ring disabled'
1378 * bit in the flags field of all the TX send ring control blocks.
1379 * These are located in NIC memory.
1380 */
1381 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1382 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1383 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1384 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1385 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1386 vrcb += sizeof(struct bge_rcb);
1387 }
1388
1389 /* Configure TX RCB 0 (we use only the first ring) */
1390 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1391 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1392 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1393 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1394 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1395 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1396 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1397 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1398 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1399
1400 /* Disable all unused RX return rings */
1401 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1402 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1403 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1404 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1405 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1406 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1407 BGE_RCB_FLAG_RING_DISABLED));
1408 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1409 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1410 (i * (sizeof(uint64_t))), 0);
1411 vrcb += sizeof(struct bge_rcb);
1412 }
1413
1414 /* Initialize RX ring indexes */
1415 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1416 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1417 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1418
1419 /*
1420 * Set up RX return ring 0
1421 * Note that the NIC address for RX return rings is 0x00000000.
1422 * The return rings live entirely within the host, so the
1423 * nicaddr field in the RCB isn't used.
1424 */
1425 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1426 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1427 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1428 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1429 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1430 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1431 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1432
1433 /* Set random backoff seed for TX */
1434 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1435 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1436 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1437 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1438 BGE_TX_BACKOFF_SEED_MASK);
1439
1440 /* Set inter-packet gap */
1441 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1442
1443 /*
1444 * Specify which ring to use for packets that don't match
1445 * any RX rules.
1446 */
1447 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1448
1449 /*
1450 * Configure number of RX lists. One interrupt distribution
1451 * list, sixteen active lists, one bad frames class.
1452 */
1453 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1454
1455 /* Inialize RX list placement stats mask. */
1456 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1457 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1458
1459 /* Disable host coalescing until we get it set up */
1460 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1461
1462 /* Poll to make sure it's shut down. */
1463 for (i = 0; i < BGE_TIMEOUT; i++) {
1464 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1465 break;
1466 DELAY(10);
1467 }
1468
1469 if (i == BGE_TIMEOUT) {
1470 device_printf(sc->bge_dev,
1471 "host coalescing engine failed to idle\n");
1472 return (ENXIO);
1473 }
1474
1475 /* Set up host coalescing defaults */
1476 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1477 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1478 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1479 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1480 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1481 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1482 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1483 }
1484 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1485 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1486
1487 /* Set up address of statistics block */
1488 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1489 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1490 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1491 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1492 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1493 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1494 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1495 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1496 }
1497
1498 /* Set up address of status block */
1499 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1500 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1501 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1502 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1503 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1504 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1505
1506 /* Turn on host coalescing state machine */
1507 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1508
1509 /* Turn on RX BD completion state machine and enable attentions */
1510 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1511 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1512
1513 /* Turn on RX list placement state machine */
1514 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1515
1516 /* Turn on RX list selector state machine. */
1517 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1518 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1519
1520 /* Turn on DMA, clear stats */
1521 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1522 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1523 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1524 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1525 ((sc->bge_flags & BGE_FLAG_TBI) ?
1526 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1527
1528 /* Set misc. local control, enable interrupts on attentions */
1529 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1530
1531#ifdef notdef
1532 /* Assert GPIO pins for PHY reset */
1533 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1534 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1535 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1536 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1537#endif
1538
1539 /* Turn on DMA completion state machine */
1540 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1541 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1542
1543 /* Turn on write DMA state machine */
1544 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1545 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1546
1547 /* Turn on read DMA state machine */
1548 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1549 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1550
1551 /* Turn on RX data completion state machine */
1552 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1553
1554 /* Turn on RX BD initiator state machine */
1555 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1556
1557 /* Turn on RX data and RX BD initiator state machine */
1558 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1559
1560 /* Turn on Mbuf cluster free state machine */
1561 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1562 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1563
1564 /* Turn on send BD completion state machine */
1565 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1566
1567 /* Turn on send data completion state machine */
1568 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1569
1570 /* Turn on send data initiator state machine */
1571 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1572
1573 /* Turn on send BD initiator state machine */
1574 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1575
1576 /* Turn on send BD selector state machine */
1577 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1578
1579 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1580 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1581 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1582
1583 /* ack/clear link change events */
1584 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1585 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1586 BGE_MACSTAT_LINK_CHANGED);
1587 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1588
1589 /* Enable PHY auto polling (for MII/GMII only) */
1590 if (sc->bge_flags & BGE_FLAG_TBI) {
1591 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1592 } else {
1593 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1594 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1595 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1596 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1597 BGE_EVTENB_MI_INTERRUPT);
1598 }
1599
1600 /*
1601 * Clear any pending link state attention.
1602 * Otherwise some link state change events may be lost until attention
1603 * is cleared by bge_intr() -> bge_link_upd() sequence.
1604 * It's not necessary on newer BCM chips - perhaps enabling link
1605 * state change attentions implies clearing pending attention.
1606 */
1607 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1608 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1609 BGE_MACSTAT_LINK_CHANGED);
1610
1611 /* Enable link state change attentions. */
1612 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1613
1614 return (0);
1615}
1616
1617const struct bge_revision *
1618bge_lookup_rev(uint32_t chipid)
1619{
1620 const struct bge_revision *br;
1621
1622 for (br = bge_revisions; br->br_name != NULL; br++) {
1623 if (br->br_chipid == chipid)
1624 return (br);
1625 }
1626
1627 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1628 if (br->br_chipid == BGE_ASICREV(chipid))
1629 return (br);
1630 }
1631
1632 return (NULL);
1633}
1634
1635const struct bge_vendor *
1636bge_lookup_vendor(uint16_t vid)
1637{
1638 const struct bge_vendor *v;
1639
1640 for (v = bge_vendors; v->v_name != NULL; v++)
1641 if (v->v_id == vid)
1642 return (v);
1643
1644 panic("%s: unknown vendor %d", __func__, vid);
1645 return (NULL);
1646}
1647
1648/*
1649 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1650 * against our list and return its name if we find a match.
1651 *
1652 * Note that since the Broadcom controller contains VPD support, we
1653 * can get the device name string from the controller itself instead
1654 * of the compiled-in string. This is a little slow, but it guarantees
1655 * we'll always announce the right product name. Unfortunately, this
1656 * is possible only later in bge_attach(), when we have established
1657 * access to EEPROM.
1658 */
1659static int
1660bge_probe(device_t dev)
1661{
1662 struct bge_type *t = bge_devs;
1663 struct bge_softc *sc = device_get_softc(dev);
1664
1665 bzero(sc, sizeof(struct bge_softc));
1666 sc->bge_dev = dev;
1667
1668 while(t->bge_vid != 0) {
1669 if ((pci_get_vendor(dev) == t->bge_vid) &&
1670 (pci_get_device(dev) == t->bge_did)) {
1671 char buf[64];
1672 const struct bge_revision *br;
1673 const struct bge_vendor *v;
1674 uint32_t id;
1675
1676 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1677 BGE_PCIMISCCTL_ASICREV;
1678 br = bge_lookup_rev(id);
1679 id >>= 16;
1680 v = bge_lookup_vendor(t->bge_vid);
1681 if (br == NULL)
1682 snprintf(buf, 64, "%s unknown ASIC (%#04x)",
1683 v->v_name, id);
1684 else
1685 snprintf(buf, 64, "%s %s, ASIC rev. %#04x",
1686 v->v_name, br->br_name, id);
1687 device_set_desc_copy(dev, buf);
1688 if (pci_get_subvendor(dev) == DELL_VENDORID)
1689 sc->bge_flags |= BGE_FLAG_NO3LED;
1690 return (0);
1691 }
1692 t++;
1693 }
1694
1695 return (ENXIO);
1696}
1697
1698static void
1699bge_dma_free(struct bge_softc *sc)
1700{
1701 int i;
1702
1703 /* Destroy DMA maps for RX buffers. */
1704 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1705 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1706 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1707 sc->bge_cdata.bge_rx_std_dmamap[i]);
1708 }
1709
1710 /* Destroy DMA maps for jumbo RX buffers. */
1711 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1712 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1713 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1714 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1715 }
1716
1717 /* Destroy DMA maps for TX buffers. */
1718 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1719 if (sc->bge_cdata.bge_tx_dmamap[i])
1720 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1721 sc->bge_cdata.bge_tx_dmamap[i]);
1722 }
1723
1724 if (sc->bge_cdata.bge_mtag)
1725 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1726
1727
1728 /* Destroy standard RX ring. */
1729 if (sc->bge_cdata.bge_rx_std_ring_map)
1730 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1731 sc->bge_cdata.bge_rx_std_ring_map);
1732 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1733 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1734 sc->bge_ldata.bge_rx_std_ring,
1735 sc->bge_cdata.bge_rx_std_ring_map);
1736
1737 if (sc->bge_cdata.bge_rx_std_ring_tag)
1738 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1739
1740 /* Destroy jumbo RX ring. */
1741 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1742 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1743 sc->bge_cdata.bge_rx_jumbo_ring_map);
1744
1745 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1746 sc->bge_ldata.bge_rx_jumbo_ring)
1747 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1748 sc->bge_ldata.bge_rx_jumbo_ring,
1749 sc->bge_cdata.bge_rx_jumbo_ring_map);
1750
1751 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1752 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1753
1754 /* Destroy RX return ring. */
1755 if (sc->bge_cdata.bge_rx_return_ring_map)
1756 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1757 sc->bge_cdata.bge_rx_return_ring_map);
1758
1759 if (sc->bge_cdata.bge_rx_return_ring_map &&
1760 sc->bge_ldata.bge_rx_return_ring)
1761 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1762 sc->bge_ldata.bge_rx_return_ring,
1763 sc->bge_cdata.bge_rx_return_ring_map);
1764
1765 if (sc->bge_cdata.bge_rx_return_ring_tag)
1766 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1767
1768 /* Destroy TX ring. */
1769 if (sc->bge_cdata.bge_tx_ring_map)
1770 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1771 sc->bge_cdata.bge_tx_ring_map);
1772
1773 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1774 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1775 sc->bge_ldata.bge_tx_ring,
1776 sc->bge_cdata.bge_tx_ring_map);
1777
1778 if (sc->bge_cdata.bge_tx_ring_tag)
1779 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1780
1781 /* Destroy status block. */
1782 if (sc->bge_cdata.bge_status_map)
1783 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1784 sc->bge_cdata.bge_status_map);
1785
1786 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1787 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1788 sc->bge_ldata.bge_status_block,
1789 sc->bge_cdata.bge_status_map);
1790
1791 if (sc->bge_cdata.bge_status_tag)
1792 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1793
1794 /* Destroy statistics block. */
1795 if (sc->bge_cdata.bge_stats_map)
1796 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1797 sc->bge_cdata.bge_stats_map);
1798
1799 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1800 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1801 sc->bge_ldata.bge_stats,
1802 sc->bge_cdata.bge_stats_map);
1803
1804 if (sc->bge_cdata.bge_stats_tag)
1805 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1806
1807 /* Destroy the parent tag. */
1808 if (sc->bge_cdata.bge_parent_tag)
1809 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1810}
1811
1812static int
1813bge_dma_alloc(device_t dev)
1814{
1815 struct bge_dmamap_arg ctx;
1816 struct bge_softc *sc;
1817 int i, error;
1818
1819 sc = device_get_softc(dev);
1820
1821 /*
1822 * Allocate the parent bus DMA tag appropriate for PCI.
1823 */
1824 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */
1825 1, 0, /* alignment, boundary */
1826 BUS_SPACE_MAXADDR, /* lowaddr */
1827 BUS_SPACE_MAXADDR, /* highaddr */
1828 NULL, NULL, /* filter, filterarg */
1829 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1830 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1831 0, /* flags */
1832 NULL, NULL, /* lockfunc, lockarg */
1833 &sc->bge_cdata.bge_parent_tag);
1834
1835 if (error != 0) {
1836 device_printf(sc->bge_dev,
1837 "could not allocate parent dma tag\n");
1838 return (ENOMEM);
1839 }
1840
1841 /*
1842 * Create tag for RX mbufs.
1843 */
1844 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1845 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1846 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1847 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1848
1849 if (error) {
1850 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1851 return (ENOMEM);
1852 }
1853
1854 /* Create DMA maps for RX buffers. */
1855 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1856 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1857 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1858 if (error) {
1859 device_printf(sc->bge_dev,
1860 "can't create DMA map for RX\n");
1861 return (ENOMEM);
1862 }
1863 }
1864
1865 /* Create DMA maps for TX buffers. */
1866 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1867 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1868 &sc->bge_cdata.bge_tx_dmamap[i]);
1869 if (error) {
1870 device_printf(sc->bge_dev,
1871 "can't create DMA map for RX\n");
1872 return (ENOMEM);
1873 }
1874 }
1875
1876 /* Create tag for standard RX ring. */
1877 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1878 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1879 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1880 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1881
1882 if (error) {
1883 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1884 return (ENOMEM);
1885 }
1886
1887 /* Allocate DMA'able memory for standard RX ring. */
1888 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1889 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1890 &sc->bge_cdata.bge_rx_std_ring_map);
1891 if (error)
1892 return (ENOMEM);
1893
1894 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1895
1896 /* Load the address of the standard RX ring. */
1897 ctx.bge_maxsegs = 1;
1898 ctx.sc = sc;
1899
1900 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1901 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1902 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1903
1904 if (error)
1905 return (ENOMEM);
1906
1907 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1908
1909 /* Create tags for jumbo mbufs. */
1910 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1911 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1912 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1913 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1914 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1915 if (error) {
1916 device_printf(sc->bge_dev,
1917 "could not allocate jumbo dma tag\n");
1918 return (ENOMEM);
1919 }
1920
1921 /* Create tag for jumbo RX ring. */
1922 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1923 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1924 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1925 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1926
1927 if (error) {
1928 device_printf(sc->bge_dev,
1929 "could not allocate jumbo ring dma tag\n");
1930 return (ENOMEM);
1931 }
1932
1933 /* Allocate DMA'able memory for jumbo RX ring. */
1934 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1935 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1936 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1937 &sc->bge_cdata.bge_rx_jumbo_ring_map);
1938 if (error)
1939 return (ENOMEM);
1940
1941 /* Load the address of the jumbo RX ring. */
1942 ctx.bge_maxsegs = 1;
1943 ctx.sc = sc;
1944
1945 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1946 sc->bge_cdata.bge_rx_jumbo_ring_map,
1947 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1948 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1949
1950 if (error)
1951 return (ENOMEM);
1952
1953 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1954
1955 /* Create DMA maps for jumbo RX buffers. */
1956 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1957 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1958 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1959 if (error) {
1960 device_printf(sc->bge_dev,
1961 "can't create DMA map for jumbo RX\n");
1962 return (ENOMEM);
1963 }
1964 }
1965
1966 }
1967
1968 /* Create tag for RX return ring. */
1969 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1970 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1971 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1972 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1973
1974 if (error) {
1975 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1976 return (ENOMEM);
1977 }
1978
1979 /* Allocate DMA'able memory for RX return ring. */
1980 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1981 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1982 &sc->bge_cdata.bge_rx_return_ring_map);
1983 if (error)
1984 return (ENOMEM);
1985
1986 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1987 BGE_RX_RTN_RING_SZ(sc));
1988
1989 /* Load the address of the RX return ring. */
1990 ctx.bge_maxsegs = 1;
1991 ctx.sc = sc;
1992
1993 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1994 sc->bge_cdata.bge_rx_return_ring_map,
1995 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1996 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1997
1998 if (error)
1999 return (ENOMEM);
2000
2001 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2002
2003 /* Create tag for TX ring. */
2004 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2005 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2006 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2007 &sc->bge_cdata.bge_tx_ring_tag);
2008
2009 if (error) {
2010 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2011 return (ENOMEM);
2012 }
2013
2014 /* Allocate DMA'able memory for TX ring. */
2015 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2016 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2017 &sc->bge_cdata.bge_tx_ring_map);
2018 if (error)
2019 return (ENOMEM);
2020
2021 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2022
2023 /* Load the address of the TX ring. */
2024 ctx.bge_maxsegs = 1;
2025 ctx.sc = sc;
2026
2027 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2028 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2029 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2030
2031 if (error)
2032 return (ENOMEM);
2033
2034 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2035
2036 /* Create tag for status block. */
2037 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2038 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2039 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2040 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2041
2042 if (error) {
2043 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2044 return (ENOMEM);
2045 }
2046
2047 /* Allocate DMA'able memory for status block. */
2048 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2049 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2050 &sc->bge_cdata.bge_status_map);
2051 if (error)
2052 return (ENOMEM);
2053
2054 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2055
2056 /* Load the address of the status block. */
2057 ctx.sc = sc;
2058 ctx.bge_maxsegs = 1;
2059
2060 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2061 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2062 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2063
2064 if (error)
2065 return (ENOMEM);
2066
2067 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2068
2069 /* Create tag for statistics block. */
2070 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2071 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2072 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2073 &sc->bge_cdata.bge_stats_tag);
2074
2075 if (error) {
2076 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2077 return (ENOMEM);
2078 }
2079
2080 /* Allocate DMA'able memory for statistics block. */
2081 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2082 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2083 &sc->bge_cdata.bge_stats_map);
2084 if (error)
2085 return (ENOMEM);
2086
2087 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2088
2089 /* Load the address of the statstics block. */
2090 ctx.sc = sc;
2091 ctx.bge_maxsegs = 1;
2092
2093 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2094 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2095 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2096
2097 if (error)
2098 return (ENOMEM);
2099
2100 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2101
2102 return (0);
2103}
2104
2105static int
2106bge_attach(device_t dev)
2107{
2108 struct ifnet *ifp;
2109 struct bge_softc *sc;
2110 uint32_t hwcfg = 0;
2111 uint32_t mac_tmp = 0;
2112 u_char eaddr[6];
2113 int error = 0, rid;
2114 int trys;
2115
2116 sc = device_get_softc(dev);
2117 sc->bge_dev = dev;
2118
2119 /*
2120 * Map control/status registers.
2121 */
2122 pci_enable_busmaster(dev);
2123
2124 rid = BGE_PCI_BAR0;
2125 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2126 RF_ACTIVE|PCI_RF_DENSE);
2127
2128 if (sc->bge_res == NULL) {
2129 device_printf (sc->bge_dev, "couldn't map memory\n");
2130 error = ENXIO;
2131 goto fail;
2132 }
2133
2134 sc->bge_btag = rman_get_bustag(sc->bge_res);
2135 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2136
2137 /* Allocate interrupt. */
2138 rid = 0;
2139
2140 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2141 RF_SHAREABLE | RF_ACTIVE);
2142
2143 if (sc->bge_irq == NULL) {
2144 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2145 error = ENXIO;
2146 goto fail;
2147 }
2148
2149 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2150
2151 /* Save ASIC rev. */
2152
2153 sc->bge_chipid =
2154 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2155 BGE_PCIMISCCTL_ASICREV;
2156 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2157 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2158
2159 /*
2160 * XXX: Broadcom Linux driver. Not in specs or eratta.
2161 * PCI-Express?
2162 */
2163 if (BGE_IS_5705_OR_BEYOND(sc)) {
2164 uint32_t v;
2165
2166 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2167 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2168 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2169 if ((v & 0xff) == BGE_PCIE_CAPID)
2170 sc->bge_flags |= BGE_FLAG_PCIE;
2171 }
2172 }
2173
2174 /*
2175 * PCI-X ?
2176 */
2177 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2178 BGE_PCISTATE_PCI_BUSMODE) == 0)
2179 sc->bge_flags |= BGE_FLAG_PCIX;
2180
2181 /* Try to reset the chip. */
2182 if (bge_reset(sc)) {
2183 device_printf(sc->bge_dev, "chip reset failed\n");
2184 bge_release_resources(sc);
2185 error = ENXIO;
2186 goto fail;
2187 }
2188
2189 sc->bge_asf_mode = 0;
2190 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2191 == BGE_MAGIC_NUMBER)) {
2192 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2193 & BGE_HWCFG_ASF) {
2194 sc->bge_asf_mode |= ASF_ENABLE;
2195 sc->bge_asf_mode |= ASF_STACKUP;
2196 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2197 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2198 }
2199 }
2200 }
2201
2202 /* Try to reset the chip again the nice way. */
2203 bge_stop_fw(sc);
2204 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2205 if (bge_reset(sc)) {
2206 device_printf(sc->bge_dev, "chip reset failed\n");
2207 bge_release_resources(sc);
2208 error = ENXIO;
2209 goto fail;
2210 }
2211
2212 bge_sig_legacy(sc, BGE_RESET_STOP);
2213 bge_sig_post_reset(sc, BGE_RESET_STOP);
2214
2215 if (bge_chipinit(sc)) {
2216 device_printf(sc->bge_dev, "chip initialization failed\n");
2217 bge_release_resources(sc);
2218 error = ENXIO;
2219 goto fail;
2220 }
2221
2222 /*
2223 * Get station address from the EEPROM.
2224 */
2225 mac_tmp = bge_readmem_ind(sc, 0x0c14);
2226 if ((mac_tmp >> 16) == 0x484b) {
2227 eaddr[0] = (u_char)(mac_tmp >> 8);
2228 eaddr[1] = (u_char)mac_tmp;
2229 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2230 eaddr[2] = (u_char)(mac_tmp >> 24);
2231 eaddr[3] = (u_char)(mac_tmp >> 16);
2232 eaddr[4] = (u_char)(mac_tmp >> 8);
2233 eaddr[5] = (u_char)mac_tmp;
2234 } else if (bge_read_eeprom(sc, eaddr,
2235 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2236 device_printf(sc->bge_dev, "failed to read station address\n");
2237 bge_release_resources(sc);
2238 error = ENXIO;
2239 goto fail;
2240 }
2241
2242 /* 5705 limits RX return ring to 512 entries. */
2243 if (BGE_IS_5705_OR_BEYOND(sc))
2244 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2245 else
2246 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2247
2248 if (bge_dma_alloc(dev)) {
2249 device_printf(sc->bge_dev,
2250 "failed to allocate DMA resources\n");
2251 bge_release_resources(sc);
2252 error = ENXIO;
2253 goto fail;
2254 }
2255
2256 /* Set default tuneable values. */
2257 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2258 sc->bge_rx_coal_ticks = 150;
2259 sc->bge_tx_coal_ticks = 150;
2260 sc->bge_rx_max_coal_bds = 64;
2261 sc->bge_tx_max_coal_bds = 128;
2262
2263 /* Set up ifnet structure */
2264 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2265 if (ifp == NULL) {
2266 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2267 bge_release_resources(sc);
2268 error = ENXIO;
2269 goto fail;
2270 }
2271 ifp->if_softc = sc;
2272 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2274 ifp->if_ioctl = bge_ioctl;
2275 ifp->if_start = bge_start;
2276 ifp->if_init = bge_init;
2277 ifp->if_mtu = ETHERMTU;
2278 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2279 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2280 IFQ_SET_READY(&ifp->if_snd);
2281 ifp->if_hwassist = BGE_CSUM_FEATURES;
2282 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2283 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2284 ifp->if_capenable = ifp->if_capabilities;
2285#ifdef DEVICE_POLLING
2286 ifp->if_capabilities |= IFCAP_POLLING;
2287#endif
2288
2289 /*
2290 * 5700 B0 chips do not support checksumming correctly due
2291 * to hardware bugs.
2292 */
2293 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2294 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2295 ifp->if_capenable &= IFCAP_HWCSUM;
2296 ifp->if_hwassist = 0;
2297 }
2298
2299 /*
2300 * Figure out what sort of media we have by checking the
2301 * hardware config word in the first 32k of NIC internal memory,
2302 * or fall back to examining the EEPROM if necessary.
2303 * Note: on some BCM5700 cards, this value appears to be unset.
2304 * If that's the case, we have to rely on identifying the NIC
2305 * by its PCI subsystem ID, as we do below for the SysKonnect
2306 * SK-9D41.
2307 */
2308 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2309 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2310 else {
2311 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2312 sizeof(hwcfg))) {
2313 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2314 bge_release_resources(sc);
2315 error = ENXIO;
2316 goto fail;
2317 }
2318 hwcfg = ntohl(hwcfg);
2319 }
2320
2321 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2322 sc->bge_flags |= BGE_FLAG_TBI;
2323
2324 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2325 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2326 sc->bge_flags |= BGE_FLAG_TBI;
2327
2328 if (sc->bge_flags & BGE_FLAG_TBI) {
2329 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2330 bge_ifmedia_upd, bge_ifmedia_sts);
2331 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2332 ifmedia_add(&sc->bge_ifmedia,
2333 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2334 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2335 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2336 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2337 } else {
2338 /*
2339 * Do transceiver setup and tell the firmware the
2340 * driver is down so we can try to get access the
2341 * probe if ASF is running. Retry a couple of times
2342 * if we get a conflict with the ASF firmware accessing
2343 * the PHY.
2344 */
2345 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2346again:
2347 bge_asf_driver_up(sc);
2348
2349 trys = 0;
2350 if (mii_phy_probe(dev, &sc->bge_miibus,
2351 bge_ifmedia_upd, bge_ifmedia_sts)) {
2352 if (trys++ < 4) {
2353 device_printf(sc->bge_dev, "Try again\n");
2354 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET);
2355 goto again;
2356 }
2357
2358 device_printf(sc->bge_dev, "MII without any PHY!\n");
2359 bge_release_resources(sc);
2360 error = ENXIO;
2361 goto fail;
2362 }
2363
2364 /*
2365 * Now tell the firmware we are going up after probing the PHY
2366 */
2367 if (sc->bge_asf_mode & ASF_STACKUP)
2368 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2369 }
2370
2371 /*
2372 * When using the BCM5701 in PCI-X mode, data corruption has
2373 * been observed in the first few bytes of some received packets.
2374 * Aligning the packet buffer in memory eliminates the corruption.
2375 * Unfortunately, this misaligns the packet payloads. On platforms
2376 * which do not support unaligned accesses, we will realign the
2377 * payloads by copying the received packets.
2378 */
2379 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2380 sc->bge_flags & BGE_FLAG_PCIX)
2381 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2382
2383 /*
2384 * Call MI attach routine.
2385 */
2386 ether_ifattach(ifp, eaddr);
2387 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2388
2389 /*
2390 * Hookup IRQ last.
2391 */
2392 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2393 bge_intr, sc, &sc->bge_intrhand);
2394
2395 if (error) {
2396 bge_detach(dev);
2397 device_printf(sc->bge_dev, "couldn't set up irq\n");
2398 }
2399
2400fail:
2401 return (error);
2402}
2403
2404static int
2405bge_detach(device_t dev)
2406{
2407 struct bge_softc *sc;
2408 struct ifnet *ifp;
2409
2410 sc = device_get_softc(dev);
2411 ifp = sc->bge_ifp;
2412
2413#ifdef DEVICE_POLLING
2414 if (ifp->if_capenable & IFCAP_POLLING)
2415 ether_poll_deregister(ifp);
2416#endif
2417
2418 BGE_LOCK(sc);
2419 bge_stop(sc);
2420 bge_reset(sc);
2421 BGE_UNLOCK(sc);
2422
2423 ether_ifdetach(ifp);
2424
2425 if (sc->bge_flags & BGE_FLAG_TBI) {
2426 ifmedia_removeall(&sc->bge_ifmedia);
2427 } else {
2428 bus_generic_detach(dev);
2429 device_delete_child(dev, sc->bge_miibus);
2430 }
2431
2432 bge_release_resources(sc);
2433
2434 return (0);
2435}
2436
2437static void
2438bge_release_resources(struct bge_softc *sc)
2439{
2440 device_t dev;
2441
2442 dev = sc->bge_dev;
2443
2444 if (sc->bge_vpd_prodname != NULL)
2445 free(sc->bge_vpd_prodname, M_DEVBUF);
2446
2447 if (sc->bge_vpd_readonly != NULL)
2448 free(sc->bge_vpd_readonly, M_DEVBUF);
2449
2450 if (sc->bge_intrhand != NULL)
2451 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2452
2453 if (sc->bge_irq != NULL)
2454 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2455
2456 if (sc->bge_res != NULL)
2457 bus_release_resource(dev, SYS_RES_MEMORY,
2458 BGE_PCI_BAR0, sc->bge_res);
2459
2460 if (sc->bge_ifp != NULL)
2461 if_free(sc->bge_ifp);
2462
2463 bge_dma_free(sc);
2464
2465 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2466 BGE_LOCK_DESTROY(sc);
2467}
2468
2469static int
2470bge_reset(struct bge_softc *sc)
2471{
2472 device_t dev;
2473 uint32_t cachesize, command, pcistate, reset;
2474 int i, val = 0;
2475
2476 dev = sc->bge_dev;
2477
2478 /* Save some important PCI state. */
2479 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2480 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2481 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2482
2483 pci_write_config(dev, BGE_PCI_MISC_CTL,
2484 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2485 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2486
2487 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2488
2489 /* XXX: Broadcom Linux driver. */
2490 if (sc->bge_flags & BGE_FLAG_PCIE) {
2491 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2492 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2493 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2494 /* Prevent PCIE link training during global reset */
2495 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2496 reset |= (1<<29);
2497 }
2498 }
2499
2500 /*
2501 * Write the magic number to the firmware mailbox at 0xb50
2502 * so that the driver can synchronize with the firmware.
2503 */
2504 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2505
2506 /* Issue global reset */
2507 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2508
2509 DELAY(1000);
2510
2511 /* XXX: Broadcom Linux driver. */
2512 if (sc->bge_flags & BGE_FLAG_PCIE) {
2513 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2514 uint32_t v;
2515
2516 DELAY(500000); /* wait for link training to complete */
2517 v = pci_read_config(dev, 0xc4, 4);
2518 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2519 }
2520 /* Set PCIE max payload size and clear error status. */
2521 pci_write_config(dev, 0xd8, 0xf5000, 4);
2522 }
2523
2524 /* Reset some of the PCI state that got zapped by reset. */
2525 pci_write_config(dev, BGE_PCI_MISC_CTL,
2526 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2527 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2528 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2529 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2530 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2531
2532 /* Enable memory arbiter. */
2533 if (BGE_IS_5714_FAMILY(sc)) {
2534 uint32_t val;
2535
2536 val = CSR_READ_4(sc, BGE_MARB_MODE);
2537 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2538 } else
2539 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2540
2541 /*
2542 * Poll the value location we just wrote until
2543 * we see the 1's complement of the magic number.
2544 * This indicates that the firmware initialization
2545 * is complete.
2546 */
2547 for (i = 0; i < BGE_TIMEOUT; i++) {
2548 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2549 if (val == ~BGE_MAGIC_NUMBER)
2550 break;
2551 DELAY(10);
2552 }
2553
2554 if (i == BGE_TIMEOUT) {
2555 device_printf(sc->bge_dev, "firmware handshake timed out\n");
2556 return(0);
2557 }
2558
2559 /*
2560 * XXX Wait for the value of the PCISTATE register to
2561 * return to its original pre-reset state. This is a
2562 * fairly good indicator of reset completion. If we don't
2563 * wait for the reset to fully complete, trying to read
2564 * from the device's non-PCI registers may yield garbage
2565 * results.
2566 */
2567 for (i = 0; i < BGE_TIMEOUT; i++) {
2568 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2569 break;
2570 DELAY(10);
2571 }
2572
2573 /* Fix up byte swapping. */
2574 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2575 BGE_MODECTL_BYTESWAP_DATA);
2576
2577 /* Tell the ASF firmware we are up */
2578 if (sc->bge_asf_mode & ASF_STACKUP)
2579 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2580
2581 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2582
2583 /*
2584 * The 5704 in TBI mode apparently needs some special
2585 * adjustment to insure the SERDES drive level is set
2586 * to 1.2V.
2587 */
2588 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2589 sc->bge_flags & BGE_FLAG_TBI) {
2590 uint32_t serdescfg;
2591
2592 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2593 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2594 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2595 }
2596
2597 /* XXX: Broadcom Linux driver. */
2598 if (sc->bge_flags & BGE_FLAG_PCIE &&
2599 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2600 uint32_t v;
2601
2602 v = CSR_READ_4(sc, 0x7c00);
2603 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2604 }
2605 DELAY(10000);
2606
2607 return(0);
2608}
2609
2610/*
2611 * Frame reception handling. This is called if there's a frame
2612 * on the receive return list.
2613 *
2614 * Note: we have to be able to handle two possibilities here:
2615 * 1) the frame is from the jumbo receive ring
2616 * 2) the frame is from the standard receive ring
2617 */
2618
2619static void
2620bge_rxeof(struct bge_softc *sc)
2621{
2622 struct ifnet *ifp;
2623 int stdcnt = 0, jumbocnt = 0;
2624
2625 BGE_LOCK_ASSERT(sc);
2626
2627 /* Nothing to do. */
2628 if (sc->bge_rx_saved_considx ==
2629 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2630 return;
2631
2632 ifp = sc->bge_ifp;
2633
2634 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2635 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2636 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2637 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2638 if (BGE_IS_JUMBO_CAPABLE(sc))
2639 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2640 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2641
2642 while(sc->bge_rx_saved_considx !=
2643 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2644 struct bge_rx_bd *cur_rx;
2645 uint32_t rxidx;
2646 struct mbuf *m = NULL;
2647 uint16_t vlan_tag = 0;
2648 int have_tag = 0;
2649
2650#ifdef DEVICE_POLLING
2651 if (ifp->if_capenable & IFCAP_POLLING) {
2652 if (sc->rxcycles <= 0)
2653 break;
2654 sc->rxcycles--;
2655 }
2656#endif
2657
2658 cur_rx =
2659 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2660
2661 rxidx = cur_rx->bge_idx;
2662 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2663
2664 if (!(ifp->if_flags & IFF_PROMISC) &&
2665 (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)) {
2666 have_tag = 1;
2667 vlan_tag = cur_rx->bge_vlan_tag;
2668 }
2669
2670 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2671 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2672 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2673 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2674 BUS_DMASYNC_POSTREAD);
2675 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2676 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2677 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2678 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2679 jumbocnt++;
2680 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2681 ifp->if_ierrors++;
2682 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2683 continue;
2684 }
2685 if (bge_newbuf_jumbo(sc,
2686 sc->bge_jumbo, NULL) == ENOBUFS) {
2687 ifp->if_ierrors++;
2688 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2689 continue;
2690 }
2691 } else {
2692 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2693 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2694 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2695 BUS_DMASYNC_POSTREAD);
2696 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2697 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2698 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2699 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2700 stdcnt++;
2701 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2702 ifp->if_ierrors++;
2703 bge_newbuf_std(sc, sc->bge_std, m);
2704 continue;
2705 }
2706 if (bge_newbuf_std(sc, sc->bge_std,
2707 NULL) == ENOBUFS) {
2708 ifp->if_ierrors++;
2709 bge_newbuf_std(sc, sc->bge_std, m);
2710 continue;
2711 }
2712 }
2713
2714 ifp->if_ipackets++;
2715#ifndef __NO_STRICT_ALIGNMENT
2716 /*
2717 * For architectures with strict alignment we must make sure
2718 * the payload is aligned.
2719 */
2720 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2721 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2722 cur_rx->bge_len);
2723 m->m_data += ETHER_ALIGN;
2724 }
2725#endif
2726 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2727 m->m_pkthdr.rcvif = ifp;
2728
2729 if (ifp->if_capenable & IFCAP_RXCSUM) {
2730 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2731 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2732 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2733 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2734 }
2735 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2736 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2737 m->m_pkthdr.csum_data =
2738 cur_rx->bge_tcp_udp_csum;
2739 m->m_pkthdr.csum_flags |=
2740 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2741 }
2742 }
2743
2744 /*
2745 * If we received a packet with a vlan tag,
2746 * attach that information to the packet.
2747 */
2748 if (have_tag) {
2749 m->m_pkthdr.ether_vtag = vlan_tag;
2750 m->m_flags |= M_VLANTAG;
2751 }
2752
2753 BGE_UNLOCK(sc);
2754 (*ifp->if_input)(ifp, m);
2755 BGE_LOCK(sc);
2756 }
2757
2758 if (stdcnt > 0)
2759 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2760 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2761
2762 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2763 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2764 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2765
2766 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2767 if (stdcnt)
2768 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2769 if (jumbocnt)
2770 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2771}
2772
2773static void
2774bge_txeof(struct bge_softc *sc)
2775{
2776 struct bge_tx_bd *cur_tx = NULL;
2777 struct ifnet *ifp;
2778
2779 BGE_LOCK_ASSERT(sc);
2780
2781 /* Nothing to do. */
2782 if (sc->bge_tx_saved_considx ==
2783 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2784 return;
2785
2786 ifp = sc->bge_ifp;
2787
2788 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2789 sc->bge_cdata.bge_tx_ring_map,
2790 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2791 /*
2792 * Go through our tx ring and free mbufs for those
2793 * frames that have been sent.
2794 */
2795 while (sc->bge_tx_saved_considx !=
2796 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2797 uint32_t idx = 0;
2798
2799 idx = sc->bge_tx_saved_considx;
2800 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2801 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2802 ifp->if_opackets++;
2803 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2804 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2805 sc->bge_cdata.bge_tx_dmamap[idx],
2806 BUS_DMASYNC_POSTWRITE);
2807 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2808 sc->bge_cdata.bge_tx_dmamap[idx]);
2809 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2810 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2811 }
2812 sc->bge_txcnt--;
2813 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2814 sc->bge_timer = 0;
2815 }
2816
2817 if (cur_tx != NULL)
2818 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2819}
2820
2821#ifdef DEVICE_POLLING
2822static void
2823bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2824{
2825 struct bge_softc *sc = ifp->if_softc;
2826 uint32_t statusword;
2827
2828 BGE_LOCK(sc);
2829 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2830 BGE_UNLOCK(sc);
2831 return;
2832 }
2833
2834 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2835 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2836
2837 statusword = atomic_readandclear_32(
2838 &sc->bge_ldata.bge_status_block->bge_status);
2839
2840 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2841 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2842
2843 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2844 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2845 sc->bge_link_evt++;
2846
2847 if (cmd == POLL_AND_CHECK_STATUS)
2848 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2849 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2850 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
2851 bge_link_upd(sc);
2852
2853 sc->rxcycles = count;
2854 bge_rxeof(sc);
2855 bge_txeof(sc);
2856 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2857 bge_start_locked(ifp);
2858
2859 BGE_UNLOCK(sc);
2860}
2861#endif /* DEVICE_POLLING */
2862
2863static void
2864bge_intr(void *xsc)
2865{
2866 struct bge_softc *sc;
2867 struct ifnet *ifp;
2868 uint32_t statusword;
2869
2870 sc = xsc;
2871
2872 BGE_LOCK(sc);
2873
2874 ifp = sc->bge_ifp;
2875
2876#ifdef DEVICE_POLLING
2877 if (ifp->if_capenable & IFCAP_POLLING) {
2878 BGE_UNLOCK(sc);
2879 return;
2880 }
2881#endif
2882
2883 /*
2884 * Do the mandatory PCI flush as well as get the link status.
2885 */
2886 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2887
2888 /* Ack interrupt and stop others from occuring. */
2889 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2890
2891 /* Make sure the descriptor ring indexes are coherent. */
2892 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2893 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2894 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2895 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2896
2897 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2898 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2899 statusword || sc->bge_link_evt)
2900 bge_link_upd(sc);
2901
2902 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2903 /* Check RX return ring producer/consumer. */
2904 bge_rxeof(sc);
2905
2906 /* Check TX ring producer/consumer. */
2907 bge_txeof(sc);
2908 }
2909
2910 /* Re-enable interrupts. */
2911 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2912
2913 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2914 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2915 bge_start_locked(ifp);
2916
2917 BGE_UNLOCK(sc);
2918}
2919
2920static void
2921bge_asf_driver_up(struct bge_softc *sc)
2922{
2923 if (sc->bge_asf_mode & ASF_STACKUP) {
2924 /* Send ASF heartbeat aprox. every 2s */
2925 if (sc->bge_asf_count)
2926 sc->bge_asf_count --;
2927 else {
2928 sc->bge_asf_count = 5;
2929 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
2930 BGE_FW_DRV_ALIVE);
2931 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
2932 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
2933 CSR_WRITE_4(sc, BGE_CPU_EVENT,
2934 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
2935 }
2936 }
2937}
2938
2939static void
2940bge_tick(void *xsc)
2941{
2942 struct bge_softc *sc = xsc;
2943 struct mii_data *mii = NULL;
2944
2945 BGE_LOCK_ASSERT(sc);
2946
2947 if (BGE_IS_5705_OR_BEYOND(sc))
2948 bge_stats_update_regs(sc);
2949 else
2950 bge_stats_update(sc);
2951
2952 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
2953 mii = device_get_softc(sc->bge_miibus);
2954 /* Don't mess with the PHY in IPMI/ASF mode */
2955 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link)))
2956 mii_tick(mii);
2957 } else {
2958 /*
2959 * Since in TBI mode auto-polling can't be used we should poll
2960 * link status manually. Here we register pending link event
2961 * and trigger interrupt.
2962 */
2963#ifdef DEVICE_POLLING
2964 /* In polling mode we poll link state in bge_poll(). */
2965 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2966#endif
2967 {
2968 sc->bge_link_evt++;
2969 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2970 }
2971 }
2972
2973 bge_asf_driver_up(sc);
2974 bge_watchdog(sc);
2975
2976 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2977}
2978
2979static void
2980bge_stats_update_regs(struct bge_softc *sc)
2981{
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83
84#include <net/if.h>
85#include <net/if_arp.h>
86#include <net/ethernet.h>
87#include <net/if_dl.h>
88#include <net/if_media.h>
89
90#include <net/bpf.h>
91
92#include <net/if_types.h>
93#include <net/if_vlan_var.h>
94
95#include <netinet/in_systm.h>
96#include <netinet/in.h>
97#include <netinet/ip.h>
98
99#include <machine/bus.h>
100#include <machine/resource.h>
101#include <sys/bus.h>
102#include <sys/rman.h>
103
104#include <dev/mii/mii.h>
105#include <dev/mii/miivar.h>
106#include "miidevs.h"
107#include <dev/mii/brgphyreg.h>
108
109#include <dev/pci/pcireg.h>
110#include <dev/pci/pcivar.h>
111
112#include <dev/bge/if_bgereg.h>
113
114#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
115#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
116
117MODULE_DEPEND(bge, pci, 1, 1, 1);
118MODULE_DEPEND(bge, ether, 1, 1, 1);
119MODULE_DEPEND(bge, miibus, 1, 1, 1);
120
121/* "device miibus" required. See GENERIC if you get errors here. */
122#include "miibus_if.h"
123
124/*
125 * Various supported device vendors/types and their names. Note: the
126 * spec seems to indicate that the hardware still has Alteon's vendor
127 * ID burned into it, though it will always be overriden by the vendor
128 * ID in the EEPROM. Just to be safe, we cover all possibilities.
129 */
130static struct bge_type {
131 uint16_t bge_vid;
132 uint16_t bge_did;
133} bge_devs[] = {
134 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
135 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
136
137 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
138 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
139 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
140
141 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
142
143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
191
192 { SK_VENDORID, SK_DEVICEID_ALTIMA },
193
194 { TC_VENDORID, TC_DEVICEID_3C996 },
195
196 { 0, 0 }
197};
198
199static const struct bge_vendor {
200 uint16_t v_id;
201 const char *v_name;
202} bge_vendors[] = {
203 { ALTEON_VENDORID, "Alteon" },
204 { ALTIMA_VENDORID, "Altima" },
205 { APPLE_VENDORID, "Apple" },
206 { BCOM_VENDORID, "Broadcom" },
207 { SK_VENDORID, "SysKonnect" },
208 { TC_VENDORID, "3Com" },
209
210 { 0, NULL }
211};
212
213static const struct bge_revision {
214 uint32_t br_chipid;
215 const char *br_name;
216} bge_revisions[] = {
217 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
218 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
219 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
220 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
221 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
222 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
223 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
224 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
225 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
226 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
227 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
228 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
229 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
230 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
231 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
232 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
233 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
234 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
235 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
236 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
237 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
238 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
239 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
240 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
241 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
242 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
243 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
244 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
245 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
246 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
247 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
248 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
249 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
250 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
251 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
252 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
253 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
254 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
255 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
256 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
257 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
258 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
259
260 { 0, NULL }
261};
262
263/*
264 * Some defaults for major revisions, so that newer steppings
265 * that we don't know about have a shot at working.
266 */
267static const struct bge_revision bge_majorrevs[] = {
268 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
269 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
270 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
271 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
272 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
273 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
274 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
275 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
276 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
277 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
278 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
279 { BGE_ASICREV_BCM5787, "unknown BCM5787" },
280
281 { 0, NULL }
282};
283
284#define BGE_IS_5705_OR_BEYOND(sc) \
285 ((sc)->bge_asicrev == BGE_ASICREV_BCM5705 || \
286 (sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \
287 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
288 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
289 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \
290 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \
291 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \
292 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
293
294#define BGE_IS_575X_PLUS(sc) \
295 ((sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \
296 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
297 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
298 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \
299 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \
300 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \
301 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
302
303#define BGE_IS_5714_FAMILY(sc) \
304 ((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
305 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
306 (sc)->bge_asicrev == BGE_ASICREV_BCM5714)
307
308#define BGE_IS_JUMBO_CAPABLE(sc) \
309 ((sc)->bge_asicrev == BGE_ASICREV_BCM5700 || \
310 (sc)->bge_asicrev == BGE_ASICREV_BCM5701 || \
311 (sc)->bge_asicrev == BGE_ASICREV_BCM5703 || \
312 (sc)->bge_asicrev == BGE_ASICREV_BCM5704)
313
314const struct bge_revision * bge_lookup_rev(uint32_t);
315const struct bge_vendor * bge_lookup_vendor(uint16_t);
316static int bge_probe(device_t);
317static int bge_attach(device_t);
318static int bge_detach(device_t);
319static int bge_suspend(device_t);
320static int bge_resume(device_t);
321static void bge_release_resources(struct bge_softc *);
322static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
323static int bge_dma_alloc(device_t);
324static void bge_dma_free(struct bge_softc *);
325
326static void bge_txeof(struct bge_softc *);
327static void bge_rxeof(struct bge_softc *);
328
329static void bge_asf_driver_up (struct bge_softc *);
330static void bge_tick(void *);
331static void bge_stats_update(struct bge_softc *);
332static void bge_stats_update_regs(struct bge_softc *);
333static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
334
335static void bge_intr(void *);
336static void bge_start_locked(struct ifnet *);
337static void bge_start(struct ifnet *);
338static int bge_ioctl(struct ifnet *, u_long, caddr_t);
339static void bge_init_locked(struct bge_softc *);
340static void bge_init(void *);
341static void bge_stop(struct bge_softc *);
342static void bge_watchdog(struct bge_softc *);
343static void bge_shutdown(device_t);
344static int bge_ifmedia_upd_locked(struct ifnet *);
345static int bge_ifmedia_upd(struct ifnet *);
346static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
347
348static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
349static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
350
351static void bge_setpromisc(struct bge_softc *);
352static void bge_setmulti(struct bge_softc *);
353
354static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
355static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
356static int bge_init_rx_ring_std(struct bge_softc *);
357static void bge_free_rx_ring_std(struct bge_softc *);
358static int bge_init_rx_ring_jumbo(struct bge_softc *);
359static void bge_free_rx_ring_jumbo(struct bge_softc *);
360static void bge_free_tx_ring(struct bge_softc *);
361static int bge_init_tx_ring(struct bge_softc *);
362
363static int bge_chipinit(struct bge_softc *);
364static int bge_blockinit(struct bge_softc *);
365
366static uint32_t bge_readmem_ind(struct bge_softc *, int);
367static void bge_writemem_ind(struct bge_softc *, int, int);
368#ifdef notdef
369static uint32_t bge_readreg_ind(struct bge_softc *, int);
370#endif
371static void bge_writereg_ind(struct bge_softc *, int, int);
372
373static int bge_miibus_readreg(device_t, int, int);
374static int bge_miibus_writereg(device_t, int, int, int);
375static void bge_miibus_statchg(device_t);
376#ifdef DEVICE_POLLING
377static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
378#endif
379
380#define BGE_RESET_START 1
381#define BGE_RESET_STOP 2
382static void bge_sig_post_reset(struct bge_softc *, int);
383static void bge_sig_legacy(struct bge_softc *, int);
384static void bge_sig_pre_reset(struct bge_softc *, int);
385static int bge_reset(struct bge_softc *);
386static void bge_link_upd(struct bge_softc *);
387
388static device_method_t bge_methods[] = {
389 /* Device interface */
390 DEVMETHOD(device_probe, bge_probe),
391 DEVMETHOD(device_attach, bge_attach),
392 DEVMETHOD(device_detach, bge_detach),
393 DEVMETHOD(device_shutdown, bge_shutdown),
394 DEVMETHOD(device_suspend, bge_suspend),
395 DEVMETHOD(device_resume, bge_resume),
396
397 /* bus interface */
398 DEVMETHOD(bus_print_child, bus_generic_print_child),
399 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
400
401 /* MII interface */
402 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
403 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
404 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
405
406 { 0, 0 }
407};
408
409static driver_t bge_driver = {
410 "bge",
411 bge_methods,
412 sizeof(struct bge_softc)
413};
414
415static devclass_t bge_devclass;
416
417DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
418DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
419
420static int bge_fake_autoneg = 0;
421static int bge_allow_asf = 1;
422
423TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
424TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
425
426SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
427SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0,
428 "Enable fake autonegotiation for certain blade systems");
429SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
430 "Allow ASF mode if available");
431
432static uint32_t
433bge_readmem_ind(struct bge_softc *sc, int off)
434{
435 device_t dev;
436
437 dev = sc->bge_dev;
438
439 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
440 return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
441}
442
443static void
444bge_writemem_ind(struct bge_softc *sc, int off, int val)
445{
446 device_t dev;
447
448 dev = sc->bge_dev;
449
450 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
451 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
452}
453
454#ifdef notdef
455static uint32_t
456bge_readreg_ind(struct bge_softc *sc, int off)
457{
458 device_t dev;
459
460 dev = sc->bge_dev;
461
462 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
463 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
464}
465#endif
466
467static void
468bge_writereg_ind(struct bge_softc *sc, int off, int val)
469{
470 device_t dev;
471
472 dev = sc->bge_dev;
473
474 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
475 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
476}
477
478/*
479 * Map a single buffer address.
480 */
481
482static void
483bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
484{
485 struct bge_dmamap_arg *ctx;
486
487 if (error)
488 return;
489
490 ctx = arg;
491
492 if (nseg > ctx->bge_maxsegs) {
493 ctx->bge_maxsegs = 0;
494 return;
495 }
496
497 ctx->bge_busaddr = segs->ds_addr;
498}
499
500/*
501 * Read a byte of data stored in the EEPROM at address 'addr.' The
502 * BCM570x supports both the traditional bitbang interface and an
503 * auto access interface for reading the EEPROM. We use the auto
504 * access method.
505 */
506static uint8_t
507bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
508{
509 int i;
510 uint32_t byte = 0;
511
512 /*
513 * Enable use of auto EEPROM access so we can avoid
514 * having to use the bitbang method.
515 */
516 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
517
518 /* Reset the EEPROM, load the clock period. */
519 CSR_WRITE_4(sc, BGE_EE_ADDR,
520 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
521 DELAY(20);
522
523 /* Issue the read EEPROM command. */
524 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
525
526 /* Wait for completion */
527 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
528 DELAY(10);
529 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
530 break;
531 }
532
533 if (i == BGE_TIMEOUT) {
534 device_printf(sc->bge_dev, "EEPROM read timed out\n");
535 return (1);
536 }
537
538 /* Get result. */
539 byte = CSR_READ_4(sc, BGE_EE_DATA);
540
541 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
542
543 return (0);
544}
545
546/*
547 * Read a sequence of bytes from the EEPROM.
548 */
549static int
550bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
551{
552 int i, error = 0;
553 uint8_t byte = 0;
554
555 for (i = 0; i < cnt; i++) {
556 error = bge_eeprom_getbyte(sc, off + i, &byte);
557 if (error)
558 break;
559 *(dest + i) = byte;
560 }
561
562 return (error ? 1 : 0);
563}
564
565static int
566bge_miibus_readreg(device_t dev, int phy, int reg)
567{
568 struct bge_softc *sc;
569 uint32_t val, autopoll;
570 int i;
571
572 sc = device_get_softc(dev);
573
574 /*
575 * Broadcom's own driver always assumes the internal
576 * PHY is at GMII address 1. On some chips, the PHY responds
577 * to accesses at all addresses, which could cause us to
578 * bogusly attach the PHY 32 times at probe type. Always
579 * restricting the lookup to address 1 is simpler than
580 * trying to figure out which chips revisions should be
581 * special-cased.
582 */
583 if (phy != 1)
584 return (0);
585
586 /* Reading with autopolling on may trigger PCI errors */
587 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
588 if (autopoll & BGE_MIMODE_AUTOPOLL) {
589 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
590 DELAY(40);
591 }
592
593 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
594 BGE_MIPHY(phy)|BGE_MIREG(reg));
595
596 for (i = 0; i < BGE_TIMEOUT; i++) {
597 val = CSR_READ_4(sc, BGE_MI_COMM);
598 if (!(val & BGE_MICOMM_BUSY))
599 break;
600 }
601
602 if (i == BGE_TIMEOUT) {
603 device_printf(sc->bge_dev, "PHY read timed out\n");
604 val = 0;
605 goto done;
606 }
607
608 val = CSR_READ_4(sc, BGE_MI_COMM);
609
610done:
611 if (autopoll & BGE_MIMODE_AUTOPOLL) {
612 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
613 DELAY(40);
614 }
615
616 if (val & BGE_MICOMM_READFAIL)
617 return (0);
618
619 return (val & 0xFFFF);
620}
621
622static int
623bge_miibus_writereg(device_t dev, int phy, int reg, int val)
624{
625 struct bge_softc *sc;
626 uint32_t autopoll;
627 int i;
628
629 sc = device_get_softc(dev);
630
631 /* Reading with autopolling on may trigger PCI errors */
632 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
633 if (autopoll & BGE_MIMODE_AUTOPOLL) {
634 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
635 DELAY(40);
636 }
637
638 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
639 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
640
641 for (i = 0; i < BGE_TIMEOUT; i++) {
642 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
643 break;
644 }
645
646 if (autopoll & BGE_MIMODE_AUTOPOLL) {
647 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
648 DELAY(40);
649 }
650
651 if (i == BGE_TIMEOUT) {
652 device_printf(sc->bge_dev, "PHY read timed out\n");
653 return (0);
654 }
655
656 return (0);
657}
658
659static void
660bge_miibus_statchg(device_t dev)
661{
662 struct bge_softc *sc;
663 struct mii_data *mii;
664 sc = device_get_softc(dev);
665 mii = device_get_softc(sc->bge_miibus);
666
667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
670 else
671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
672
673 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
674 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
675 else
676 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
677}
678
679/*
680 * Intialize a standard receive ring descriptor.
681 */
682static int
683bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
684{
685 struct mbuf *m_new = NULL;
686 struct bge_rx_bd *r;
687 struct bge_dmamap_arg ctx;
688 int error;
689
690 if (m == NULL) {
691 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
692 if (m_new == NULL)
693 return (ENOBUFS);
694 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
695 } else {
696 m_new = m;
697 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
698 m_new->m_data = m_new->m_ext.ext_buf;
699 }
700
701 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
702 m_adj(m_new, ETHER_ALIGN);
703 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
704 r = &sc->bge_ldata.bge_rx_std_ring[i];
705 ctx.bge_maxsegs = 1;
706 ctx.sc = sc;
707 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
708 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
709 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
710 if (error || ctx.bge_maxsegs == 0) {
711 if (m == NULL) {
712 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
713 m_freem(m_new);
714 }
715 return (ENOMEM);
716 }
717 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
718 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
719 r->bge_flags = BGE_RXBDFLAG_END;
720 r->bge_len = m_new->m_len;
721 r->bge_idx = i;
722
723 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
724 sc->bge_cdata.bge_rx_std_dmamap[i],
725 BUS_DMASYNC_PREREAD);
726
727 return (0);
728}
729
730/*
731 * Initialize a jumbo receive ring descriptor. This allocates
732 * a jumbo buffer from the pool managed internally by the driver.
733 */
734static int
735bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
736{
737 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
738 struct bge_extrx_bd *r;
739 struct mbuf *m_new = NULL;
740 int nsegs;
741 int error;
742
743 if (m == NULL) {
744 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
745 if (m_new == NULL)
746 return (ENOBUFS);
747
748 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
749 if (!(m_new->m_flags & M_EXT)) {
750 m_freem(m_new);
751 return (ENOBUFS);
752 }
753 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
754 } else {
755 m_new = m;
756 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
757 m_new->m_data = m_new->m_ext.ext_buf;
758 }
759
760 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
761 m_adj(m_new, ETHER_ALIGN);
762
763 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
764 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
765 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
766 if (error) {
767 if (m == NULL)
768 m_freem(m_new);
769 return (error);
770 }
771 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
772
773 /*
774 * Fill in the extended RX buffer descriptor.
775 */
776 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
777 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
778 r->bge_idx = i;
779 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
780 switch (nsegs) {
781 case 4:
782 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
783 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
784 r->bge_len3 = segs[3].ds_len;
785 case 3:
786 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
787 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
788 r->bge_len2 = segs[2].ds_len;
789 case 2:
790 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
791 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
792 r->bge_len1 = segs[1].ds_len;
793 case 1:
794 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
795 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
796 r->bge_len0 = segs[0].ds_len;
797 break;
798 default:
799 panic("%s: %d segments\n", __func__, nsegs);
800 }
801
802 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
803 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
804 BUS_DMASYNC_PREREAD);
805
806 return (0);
807}
808
809/*
810 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
811 * that's 1MB or memory, which is a lot. For now, we fill only the first
812 * 256 ring entries and hope that our CPU is fast enough to keep up with
813 * the NIC.
814 */
815static int
816bge_init_rx_ring_std(struct bge_softc *sc)
817{
818 int i;
819
820 for (i = 0; i < BGE_SSLOTS; i++) {
821 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
822 return (ENOBUFS);
823 };
824
825 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
826 sc->bge_cdata.bge_rx_std_ring_map,
827 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
828
829 sc->bge_std = i - 1;
830 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
831
832 return (0);
833}
834
835static void
836bge_free_rx_ring_std(struct bge_softc *sc)
837{
838 int i;
839
840 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
841 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
842 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
843 sc->bge_cdata.bge_rx_std_dmamap[i],
844 BUS_DMASYNC_POSTREAD);
845 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
846 sc->bge_cdata.bge_rx_std_dmamap[i]);
847 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
848 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
849 }
850 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
851 sizeof(struct bge_rx_bd));
852 }
853}
854
855static int
856bge_init_rx_ring_jumbo(struct bge_softc *sc)
857{
858 struct bge_rcb *rcb;
859 int i;
860
861 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
862 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
863 return (ENOBUFS);
864 };
865
866 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
867 sc->bge_cdata.bge_rx_jumbo_ring_map,
868 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
869
870 sc->bge_jumbo = i - 1;
871
872 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
873 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
874 BGE_RCB_FLAG_USE_EXT_RX_BD);
875 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
876
877 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
878
879 return (0);
880}
881
882static void
883bge_free_rx_ring_jumbo(struct bge_softc *sc)
884{
885 int i;
886
887 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
888 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
889 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
890 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
891 BUS_DMASYNC_POSTREAD);
892 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
893 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
894 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
895 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
896 }
897 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
898 sizeof(struct bge_extrx_bd));
899 }
900}
901
902static void
903bge_free_tx_ring(struct bge_softc *sc)
904{
905 int i;
906
907 if (sc->bge_ldata.bge_tx_ring == NULL)
908 return;
909
910 for (i = 0; i < BGE_TX_RING_CNT; i++) {
911 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
912 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
913 sc->bge_cdata.bge_tx_dmamap[i],
914 BUS_DMASYNC_POSTWRITE);
915 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
916 sc->bge_cdata.bge_tx_dmamap[i]);
917 m_freem(sc->bge_cdata.bge_tx_chain[i]);
918 sc->bge_cdata.bge_tx_chain[i] = NULL;
919 }
920 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
921 sizeof(struct bge_tx_bd));
922 }
923}
924
925static int
926bge_init_tx_ring(struct bge_softc *sc)
927{
928 sc->bge_txcnt = 0;
929 sc->bge_tx_saved_considx = 0;
930
931 /* Initialize transmit producer index for host-memory send ring. */
932 sc->bge_tx_prodidx = 0;
933 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
934
935 /* 5700 b2 errata */
936 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
937 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
938
939 /* NIC-memory send ring not used; initialize to zero. */
940 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
941 /* 5700 b2 errata */
942 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
943 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
944
945 return (0);
946}
947
948static void
949bge_setpromisc(struct bge_softc *sc)
950{
951 struct ifnet *ifp;
952
953 BGE_LOCK_ASSERT(sc);
954
955 ifp = sc->bge_ifp;
956
957 /*
958 * Enable or disable promiscuous mode as needed.
959 * Do not strip VLAN tag when promiscuous mode is enabled.
960 */
961 if (ifp->if_flags & IFF_PROMISC)
962 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC |
963 BGE_RXMODE_RX_KEEP_VLAN_DIAG);
964 else
965 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC |
966 BGE_RXMODE_RX_KEEP_VLAN_DIAG);
967}
968
969static void
970bge_setmulti(struct bge_softc *sc)
971{
972 struct ifnet *ifp;
973 struct ifmultiaddr *ifma;
974 uint32_t hashes[4] = { 0, 0, 0, 0 };
975 int h, i;
976
977 BGE_LOCK_ASSERT(sc);
978
979 ifp = sc->bge_ifp;
980
981 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
982 for (i = 0; i < 4; i++)
983 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
984 return;
985 }
986
987 /* First, zot all the existing filters. */
988 for (i = 0; i < 4; i++)
989 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
990
991 /* Now program new ones. */
992 IF_ADDR_LOCK(ifp);
993 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
994 if (ifma->ifma_addr->sa_family != AF_LINK)
995 continue;
996 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
997 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
998 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
999 }
1000 IF_ADDR_UNLOCK(ifp);
1001
1002 for (i = 0; i < 4; i++)
1003 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1004}
1005
1006static void
1007bge_sig_pre_reset(sc, type)
1008 struct bge_softc *sc;
1009 int type;
1010{
1011 /*
1012 * Some chips don't like this so only do this if ASF is enabled
1013 */
1014 if (sc->bge_asf_mode)
1015 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1016
1017 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1018 switch (type) {
1019 case BGE_RESET_START:
1020 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1021 break;
1022 case BGE_RESET_STOP:
1023 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1024 break;
1025 }
1026 }
1027}
1028
1029static void
1030bge_sig_post_reset(sc, type)
1031 struct bge_softc *sc;
1032 int type;
1033{
1034 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1035 switch (type) {
1036 case BGE_RESET_START:
1037 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1038 /* START DONE */
1039 break;
1040 case BGE_RESET_STOP:
1041 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1042 break;
1043 }
1044 }
1045}
1046
1047static void
1048bge_sig_legacy(sc, type)
1049 struct bge_softc *sc;
1050 int type;
1051{
1052 if (sc->bge_asf_mode) {
1053 switch (type) {
1054 case BGE_RESET_START:
1055 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1056 break;
1057 case BGE_RESET_STOP:
1058 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1059 break;
1060 }
1061 }
1062}
1063
1064void bge_stop_fw(struct bge_softc *);
1065void
1066bge_stop_fw(sc)
1067 struct bge_softc *sc;
1068{
1069 int i;
1070
1071 if (sc->bge_asf_mode) {
1072 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1073 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1074 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
1075
1076 for (i = 0; i < 100; i++ ) {
1077 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1078 break;
1079 DELAY(10);
1080 }
1081 }
1082}
1083
1084/*
1085 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1086 * self-test results.
1087 */
1088static int
1089bge_chipinit(struct bge_softc *sc)
1090{
1091 uint32_t dma_rw_ctl;
1092 int i;
1093
1094 /* Set endianness before we access any non-PCI registers. */
1095 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1096
1097 /*
1098 * Check the 'ROM failed' bit on the RX CPU to see if
1099 * self-tests passed.
1100 */
1101 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1102 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1103 return (ENODEV);
1104 }
1105
1106 /* Clear the MAC control register */
1107 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1108
1109 /*
1110 * Clear the MAC statistics block in the NIC's
1111 * internal memory.
1112 */
1113 for (i = BGE_STATS_BLOCK;
1114 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1115 BGE_MEMWIN_WRITE(sc, i, 0);
1116
1117 for (i = BGE_STATUS_BLOCK;
1118 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1119 BGE_MEMWIN_WRITE(sc, i, 0);
1120
1121 /* Set up the PCI DMA control register. */
1122 if (sc->bge_flags & BGE_FLAG_PCIE) {
1123 /* PCI Express bus */
1124 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1125 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1126 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1127 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1128 /* PCI-X bus */
1129 if (BGE_IS_5714_FAMILY(sc)) {
1130 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1131 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1132 /* XXX magic values, Broadcom-supplied Linux driver */
1133 if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1134 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1135 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1136 else
1137 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1138
1139 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1140 /*
1141 * The 5704 uses a different encoding of read/write
1142 * watermarks.
1143 */
1144 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1145 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1146 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1147 else
1148 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1149 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1150 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1151 (0x0F);
1152
1153 /*
1154 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1155 * for hardware bugs.
1156 */
1157 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1158 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1159 uint32_t tmp;
1160
1161 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1162 if (tmp == 0x6 || tmp == 0x7)
1163 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1164 }
1165 } else
1166 /* Conventional PCI bus */
1167 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1168 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1169 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1170 (0x0F);
1171
1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1173 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1174 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1175 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1176 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1177
1178 /*
1179 * Set up general mode register.
1180 */
1181 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1182 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1183 BGE_MODECTL_TX_NO_PHDR_CSUM);
1184
1185 /*
1186 * Tell the firmware the driver is running
1187 */
1188 if (sc->bge_asf_mode & ASF_STACKUP)
1189 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1190
1191 /*
1192 * Disable memory write invalidate. Apparently it is not supported
1193 * properly by these devices.
1194 */
1195 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1196
1197#ifdef __brokenalpha__
1198 /*
1199 * Must insure that we do not cross an 8K (bytes) boundary
1200 * for DMA reads. Our highest limit is 1K bytes. This is a
1201 * restriction on some ALPHA platforms with early revision
1202 * 21174 PCI chipsets, such as the AlphaPC 164lx
1203 */
1204 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1205 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1206#endif
1207
1208 /* Set the timer prescaler (always 66Mhz) */
1209 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1210
1211 return (0);
1212}
1213
1214static int
1215bge_blockinit(struct bge_softc *sc)
1216{
1217 struct bge_rcb *rcb;
1218 bus_size_t vrcb;
1219 bge_hostaddr taddr;
1220 int i;
1221
1222 /*
1223 * Initialize the memory window pointer register so that
1224 * we can access the first 32K of internal NIC RAM. This will
1225 * allow us to set up the TX send ring RCBs and the RX return
1226 * ring RCBs, plus other things which live in NIC memory.
1227 */
1228 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1229
1230 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1231
1232 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1233 /* Configure mbuf memory pool */
1234 if (sc->bge_flags & BGE_FLAG_EXTRAM) {
1235 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1236 BGE_EXT_SSRAM);
1237 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1238 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1239 else
1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1241 } else {
1242 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1243 BGE_BUFFPOOL_1);
1244 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1245 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1246 else
1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1248 }
1249
1250 /* Configure DMA resource pool */
1251 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1252 BGE_DMA_DESCRIPTORS);
1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1254 }
1255
1256 /* Configure mbuf pool watermarks */
1257 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1258 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1259 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1260 } else {
1261 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1262 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1263 }
1264 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1265
1266 /* Configure DMA resource watermarks */
1267 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1268 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1269
1270 /* Enable buffer manager */
1271 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1272 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1273 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1274
1275 /* Poll for buffer manager start indication */
1276 for (i = 0; i < BGE_TIMEOUT; i++) {
1277 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1278 break;
1279 DELAY(10);
1280 }
1281
1282 if (i == BGE_TIMEOUT) {
1283 device_printf(sc->bge_dev,
1284 "buffer manager failed to start\n");
1285 return (ENXIO);
1286 }
1287 }
1288
1289 /* Enable flow-through queues */
1290 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1291 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1292
1293 /* Wait until queue initialization is complete */
1294 for (i = 0; i < BGE_TIMEOUT; i++) {
1295 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1296 break;
1297 DELAY(10);
1298 }
1299
1300 if (i == BGE_TIMEOUT) {
1301 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1302 return (ENXIO);
1303 }
1304
1305 /* Initialize the standard RX ring control block */
1306 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1307 rcb->bge_hostaddr.bge_addr_lo =
1308 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1309 rcb->bge_hostaddr.bge_addr_hi =
1310 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1311 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1312 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1313 if (BGE_IS_5705_OR_BEYOND(sc))
1314 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1315 else
1316 rcb->bge_maxlen_flags =
1317 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1318 if (sc->bge_flags & BGE_FLAG_EXTRAM)
1319 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1320 else
1321 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1322 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1323 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1324
1325 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1326 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1327
1328 /*
1329 * Initialize the jumbo RX ring control block
1330 * We set the 'ring disabled' bit in the flags
1331 * field until we're actually ready to start
1332 * using this ring (i.e. once we set the MTU
1333 * high enough to require it).
1334 */
1335 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1336 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1337
1338 rcb->bge_hostaddr.bge_addr_lo =
1339 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1340 rcb->bge_hostaddr.bge_addr_hi =
1341 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1342 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1343 sc->bge_cdata.bge_rx_jumbo_ring_map,
1344 BUS_DMASYNC_PREREAD);
1345 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1346 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1347 if (sc->bge_flags & BGE_FLAG_EXTRAM)
1348 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1349 else
1350 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1351 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1352 rcb->bge_hostaddr.bge_addr_hi);
1353 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1354 rcb->bge_hostaddr.bge_addr_lo);
1355
1356 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1357 rcb->bge_maxlen_flags);
1358 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1359
1360 /* Set up dummy disabled mini ring RCB */
1361 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1362 rcb->bge_maxlen_flags =
1363 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1364 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1365 rcb->bge_maxlen_flags);
1366 }
1367
1368 /*
1369 * Set the BD ring replentish thresholds. The recommended
1370 * values are 1/8th the number of descriptors allocated to
1371 * each ring.
1372 */
1373 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1374 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1375
1376 /*
1377 * Disable all unused send rings by setting the 'ring disabled'
1378 * bit in the flags field of all the TX send ring control blocks.
1379 * These are located in NIC memory.
1380 */
1381 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1382 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1383 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1384 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1385 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1386 vrcb += sizeof(struct bge_rcb);
1387 }
1388
1389 /* Configure TX RCB 0 (we use only the first ring) */
1390 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1391 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1392 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1393 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1394 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1395 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1396 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1397 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1398 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1399
1400 /* Disable all unused RX return rings */
1401 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1402 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1403 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1404 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1405 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1406 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1407 BGE_RCB_FLAG_RING_DISABLED));
1408 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1409 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1410 (i * (sizeof(uint64_t))), 0);
1411 vrcb += sizeof(struct bge_rcb);
1412 }
1413
1414 /* Initialize RX ring indexes */
1415 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1416 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1417 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1418
1419 /*
1420 * Set up RX return ring 0
1421 * Note that the NIC address for RX return rings is 0x00000000.
1422 * The return rings live entirely within the host, so the
1423 * nicaddr field in the RCB isn't used.
1424 */
1425 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1426 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1427 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1428 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1429 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1430 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1431 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1432
1433 /* Set random backoff seed for TX */
1434 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1435 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1436 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1437 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1438 BGE_TX_BACKOFF_SEED_MASK);
1439
1440 /* Set inter-packet gap */
1441 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1442
1443 /*
1444 * Specify which ring to use for packets that don't match
1445 * any RX rules.
1446 */
1447 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1448
1449 /*
1450 * Configure number of RX lists. One interrupt distribution
1451 * list, sixteen active lists, one bad frames class.
1452 */
1453 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1454
1455 /* Inialize RX list placement stats mask. */
1456 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1457 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1458
1459 /* Disable host coalescing until we get it set up */
1460 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1461
1462 /* Poll to make sure it's shut down. */
1463 for (i = 0; i < BGE_TIMEOUT; i++) {
1464 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1465 break;
1466 DELAY(10);
1467 }
1468
1469 if (i == BGE_TIMEOUT) {
1470 device_printf(sc->bge_dev,
1471 "host coalescing engine failed to idle\n");
1472 return (ENXIO);
1473 }
1474
1475 /* Set up host coalescing defaults */
1476 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1477 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1478 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1479 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1480 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1481 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1482 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1483 }
1484 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1485 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1486
1487 /* Set up address of statistics block */
1488 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1489 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1490 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1491 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1492 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1493 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1494 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1495 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1496 }
1497
1498 /* Set up address of status block */
1499 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1500 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1501 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1502 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1503 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1504 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1505
1506 /* Turn on host coalescing state machine */
1507 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1508
1509 /* Turn on RX BD completion state machine and enable attentions */
1510 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1511 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1512
1513 /* Turn on RX list placement state machine */
1514 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1515
1516 /* Turn on RX list selector state machine. */
1517 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1518 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1519
1520 /* Turn on DMA, clear stats */
1521 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1522 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1523 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1524 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1525 ((sc->bge_flags & BGE_FLAG_TBI) ?
1526 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1527
1528 /* Set misc. local control, enable interrupts on attentions */
1529 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1530
1531#ifdef notdef
1532 /* Assert GPIO pins for PHY reset */
1533 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1534 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1535 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1536 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1537#endif
1538
1539 /* Turn on DMA completion state machine */
1540 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1541 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1542
1543 /* Turn on write DMA state machine */
1544 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1545 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1546
1547 /* Turn on read DMA state machine */
1548 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1549 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1550
1551 /* Turn on RX data completion state machine */
1552 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1553
1554 /* Turn on RX BD initiator state machine */
1555 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1556
1557 /* Turn on RX data and RX BD initiator state machine */
1558 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1559
1560 /* Turn on Mbuf cluster free state machine */
1561 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1562 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1563
1564 /* Turn on send BD completion state machine */
1565 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1566
1567 /* Turn on send data completion state machine */
1568 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1569
1570 /* Turn on send data initiator state machine */
1571 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1572
1573 /* Turn on send BD initiator state machine */
1574 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1575
1576 /* Turn on send BD selector state machine */
1577 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1578
1579 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1580 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1581 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1582
1583 /* ack/clear link change events */
1584 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1585 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1586 BGE_MACSTAT_LINK_CHANGED);
1587 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1588
1589 /* Enable PHY auto polling (for MII/GMII only) */
1590 if (sc->bge_flags & BGE_FLAG_TBI) {
1591 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1592 } else {
1593 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1594 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1595 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1596 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1597 BGE_EVTENB_MI_INTERRUPT);
1598 }
1599
1600 /*
1601 * Clear any pending link state attention.
1602 * Otherwise some link state change events may be lost until attention
1603 * is cleared by bge_intr() -> bge_link_upd() sequence.
1604 * It's not necessary on newer BCM chips - perhaps enabling link
1605 * state change attentions implies clearing pending attention.
1606 */
1607 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1608 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1609 BGE_MACSTAT_LINK_CHANGED);
1610
1611 /* Enable link state change attentions. */
1612 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1613
1614 return (0);
1615}
1616
1617const struct bge_revision *
1618bge_lookup_rev(uint32_t chipid)
1619{
1620 const struct bge_revision *br;
1621
1622 for (br = bge_revisions; br->br_name != NULL; br++) {
1623 if (br->br_chipid == chipid)
1624 return (br);
1625 }
1626
1627 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1628 if (br->br_chipid == BGE_ASICREV(chipid))
1629 return (br);
1630 }
1631
1632 return (NULL);
1633}
1634
1635const struct bge_vendor *
1636bge_lookup_vendor(uint16_t vid)
1637{
1638 const struct bge_vendor *v;
1639
1640 for (v = bge_vendors; v->v_name != NULL; v++)
1641 if (v->v_id == vid)
1642 return (v);
1643
1644 panic("%s: unknown vendor %d", __func__, vid);
1645 return (NULL);
1646}
1647
1648/*
1649 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1650 * against our list and return its name if we find a match.
1651 *
1652 * Note that since the Broadcom controller contains VPD support, we
1653 * can get the device name string from the controller itself instead
1654 * of the compiled-in string. This is a little slow, but it guarantees
1655 * we'll always announce the right product name. Unfortunately, this
1656 * is possible only later in bge_attach(), when we have established
1657 * access to EEPROM.
1658 */
1659static int
1660bge_probe(device_t dev)
1661{
1662 struct bge_type *t = bge_devs;
1663 struct bge_softc *sc = device_get_softc(dev);
1664
1665 bzero(sc, sizeof(struct bge_softc));
1666 sc->bge_dev = dev;
1667
1668 while(t->bge_vid != 0) {
1669 if ((pci_get_vendor(dev) == t->bge_vid) &&
1670 (pci_get_device(dev) == t->bge_did)) {
1671 char buf[64];
1672 const struct bge_revision *br;
1673 const struct bge_vendor *v;
1674 uint32_t id;
1675
1676 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1677 BGE_PCIMISCCTL_ASICREV;
1678 br = bge_lookup_rev(id);
1679 id >>= 16;
1680 v = bge_lookup_vendor(t->bge_vid);
1681 if (br == NULL)
1682 snprintf(buf, 64, "%s unknown ASIC (%#04x)",
1683 v->v_name, id);
1684 else
1685 snprintf(buf, 64, "%s %s, ASIC rev. %#04x",
1686 v->v_name, br->br_name, id);
1687 device_set_desc_copy(dev, buf);
1688 if (pci_get_subvendor(dev) == DELL_VENDORID)
1689 sc->bge_flags |= BGE_FLAG_NO3LED;
1690 return (0);
1691 }
1692 t++;
1693 }
1694
1695 return (ENXIO);
1696}
1697
1698static void
1699bge_dma_free(struct bge_softc *sc)
1700{
1701 int i;
1702
1703 /* Destroy DMA maps for RX buffers. */
1704 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1705 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1706 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1707 sc->bge_cdata.bge_rx_std_dmamap[i]);
1708 }
1709
1710 /* Destroy DMA maps for jumbo RX buffers. */
1711 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1712 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1713 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1714 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1715 }
1716
1717 /* Destroy DMA maps for TX buffers. */
1718 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1719 if (sc->bge_cdata.bge_tx_dmamap[i])
1720 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1721 sc->bge_cdata.bge_tx_dmamap[i]);
1722 }
1723
1724 if (sc->bge_cdata.bge_mtag)
1725 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1726
1727
1728 /* Destroy standard RX ring. */
1729 if (sc->bge_cdata.bge_rx_std_ring_map)
1730 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1731 sc->bge_cdata.bge_rx_std_ring_map);
1732 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1733 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1734 sc->bge_ldata.bge_rx_std_ring,
1735 sc->bge_cdata.bge_rx_std_ring_map);
1736
1737 if (sc->bge_cdata.bge_rx_std_ring_tag)
1738 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1739
1740 /* Destroy jumbo RX ring. */
1741 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1742 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1743 sc->bge_cdata.bge_rx_jumbo_ring_map);
1744
1745 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1746 sc->bge_ldata.bge_rx_jumbo_ring)
1747 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1748 sc->bge_ldata.bge_rx_jumbo_ring,
1749 sc->bge_cdata.bge_rx_jumbo_ring_map);
1750
1751 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1752 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1753
1754 /* Destroy RX return ring. */
1755 if (sc->bge_cdata.bge_rx_return_ring_map)
1756 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1757 sc->bge_cdata.bge_rx_return_ring_map);
1758
1759 if (sc->bge_cdata.bge_rx_return_ring_map &&
1760 sc->bge_ldata.bge_rx_return_ring)
1761 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1762 sc->bge_ldata.bge_rx_return_ring,
1763 sc->bge_cdata.bge_rx_return_ring_map);
1764
1765 if (sc->bge_cdata.bge_rx_return_ring_tag)
1766 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1767
1768 /* Destroy TX ring. */
1769 if (sc->bge_cdata.bge_tx_ring_map)
1770 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1771 sc->bge_cdata.bge_tx_ring_map);
1772
1773 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1774 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1775 sc->bge_ldata.bge_tx_ring,
1776 sc->bge_cdata.bge_tx_ring_map);
1777
1778 if (sc->bge_cdata.bge_tx_ring_tag)
1779 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1780
1781 /* Destroy status block. */
1782 if (sc->bge_cdata.bge_status_map)
1783 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1784 sc->bge_cdata.bge_status_map);
1785
1786 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1787 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1788 sc->bge_ldata.bge_status_block,
1789 sc->bge_cdata.bge_status_map);
1790
1791 if (sc->bge_cdata.bge_status_tag)
1792 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1793
1794 /* Destroy statistics block. */
1795 if (sc->bge_cdata.bge_stats_map)
1796 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1797 sc->bge_cdata.bge_stats_map);
1798
1799 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1800 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1801 sc->bge_ldata.bge_stats,
1802 sc->bge_cdata.bge_stats_map);
1803
1804 if (sc->bge_cdata.bge_stats_tag)
1805 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1806
1807 /* Destroy the parent tag. */
1808 if (sc->bge_cdata.bge_parent_tag)
1809 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1810}
1811
1812static int
1813bge_dma_alloc(device_t dev)
1814{
1815 struct bge_dmamap_arg ctx;
1816 struct bge_softc *sc;
1817 int i, error;
1818
1819 sc = device_get_softc(dev);
1820
1821 /*
1822 * Allocate the parent bus DMA tag appropriate for PCI.
1823 */
1824 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */
1825 1, 0, /* alignment, boundary */
1826 BUS_SPACE_MAXADDR, /* lowaddr */
1827 BUS_SPACE_MAXADDR, /* highaddr */
1828 NULL, NULL, /* filter, filterarg */
1829 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1830 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1831 0, /* flags */
1832 NULL, NULL, /* lockfunc, lockarg */
1833 &sc->bge_cdata.bge_parent_tag);
1834
1835 if (error != 0) {
1836 device_printf(sc->bge_dev,
1837 "could not allocate parent dma tag\n");
1838 return (ENOMEM);
1839 }
1840
1841 /*
1842 * Create tag for RX mbufs.
1843 */
1844 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1845 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1846 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1847 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1848
1849 if (error) {
1850 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1851 return (ENOMEM);
1852 }
1853
1854 /* Create DMA maps for RX buffers. */
1855 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1856 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1857 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1858 if (error) {
1859 device_printf(sc->bge_dev,
1860 "can't create DMA map for RX\n");
1861 return (ENOMEM);
1862 }
1863 }
1864
1865 /* Create DMA maps for TX buffers. */
1866 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1867 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1868 &sc->bge_cdata.bge_tx_dmamap[i]);
1869 if (error) {
1870 device_printf(sc->bge_dev,
1871 "can't create DMA map for RX\n");
1872 return (ENOMEM);
1873 }
1874 }
1875
1876 /* Create tag for standard RX ring. */
1877 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1878 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1879 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1880 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1881
1882 if (error) {
1883 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1884 return (ENOMEM);
1885 }
1886
1887 /* Allocate DMA'able memory for standard RX ring. */
1888 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1889 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1890 &sc->bge_cdata.bge_rx_std_ring_map);
1891 if (error)
1892 return (ENOMEM);
1893
1894 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1895
1896 /* Load the address of the standard RX ring. */
1897 ctx.bge_maxsegs = 1;
1898 ctx.sc = sc;
1899
1900 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1901 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1902 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1903
1904 if (error)
1905 return (ENOMEM);
1906
1907 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1908
1909 /* Create tags for jumbo mbufs. */
1910 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1911 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1912 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1913 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1914 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1915 if (error) {
1916 device_printf(sc->bge_dev,
1917 "could not allocate jumbo dma tag\n");
1918 return (ENOMEM);
1919 }
1920
1921 /* Create tag for jumbo RX ring. */
1922 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1923 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1924 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1925 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1926
1927 if (error) {
1928 device_printf(sc->bge_dev,
1929 "could not allocate jumbo ring dma tag\n");
1930 return (ENOMEM);
1931 }
1932
1933 /* Allocate DMA'able memory for jumbo RX ring. */
1934 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1935 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1936 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1937 &sc->bge_cdata.bge_rx_jumbo_ring_map);
1938 if (error)
1939 return (ENOMEM);
1940
1941 /* Load the address of the jumbo RX ring. */
1942 ctx.bge_maxsegs = 1;
1943 ctx.sc = sc;
1944
1945 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1946 sc->bge_cdata.bge_rx_jumbo_ring_map,
1947 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1948 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1949
1950 if (error)
1951 return (ENOMEM);
1952
1953 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1954
1955 /* Create DMA maps for jumbo RX buffers. */
1956 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1957 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1958 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1959 if (error) {
1960 device_printf(sc->bge_dev,
1961 "can't create DMA map for jumbo RX\n");
1962 return (ENOMEM);
1963 }
1964 }
1965
1966 }
1967
1968 /* Create tag for RX return ring. */
1969 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1970 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1971 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1972 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1973
1974 if (error) {
1975 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1976 return (ENOMEM);
1977 }
1978
1979 /* Allocate DMA'able memory for RX return ring. */
1980 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1981 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1982 &sc->bge_cdata.bge_rx_return_ring_map);
1983 if (error)
1984 return (ENOMEM);
1985
1986 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1987 BGE_RX_RTN_RING_SZ(sc));
1988
1989 /* Load the address of the RX return ring. */
1990 ctx.bge_maxsegs = 1;
1991 ctx.sc = sc;
1992
1993 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1994 sc->bge_cdata.bge_rx_return_ring_map,
1995 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1996 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1997
1998 if (error)
1999 return (ENOMEM);
2000
2001 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2002
2003 /* Create tag for TX ring. */
2004 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2005 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2006 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2007 &sc->bge_cdata.bge_tx_ring_tag);
2008
2009 if (error) {
2010 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2011 return (ENOMEM);
2012 }
2013
2014 /* Allocate DMA'able memory for TX ring. */
2015 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2016 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2017 &sc->bge_cdata.bge_tx_ring_map);
2018 if (error)
2019 return (ENOMEM);
2020
2021 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2022
2023 /* Load the address of the TX ring. */
2024 ctx.bge_maxsegs = 1;
2025 ctx.sc = sc;
2026
2027 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2028 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2029 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2030
2031 if (error)
2032 return (ENOMEM);
2033
2034 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2035
2036 /* Create tag for status block. */
2037 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2038 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2039 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2040 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2041
2042 if (error) {
2043 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2044 return (ENOMEM);
2045 }
2046
2047 /* Allocate DMA'able memory for status block. */
2048 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2049 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2050 &sc->bge_cdata.bge_status_map);
2051 if (error)
2052 return (ENOMEM);
2053
2054 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2055
2056 /* Load the address of the status block. */
2057 ctx.sc = sc;
2058 ctx.bge_maxsegs = 1;
2059
2060 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2061 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2062 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2063
2064 if (error)
2065 return (ENOMEM);
2066
2067 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2068
2069 /* Create tag for statistics block. */
2070 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2071 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2072 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2073 &sc->bge_cdata.bge_stats_tag);
2074
2075 if (error) {
2076 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2077 return (ENOMEM);
2078 }
2079
2080 /* Allocate DMA'able memory for statistics block. */
2081 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2082 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2083 &sc->bge_cdata.bge_stats_map);
2084 if (error)
2085 return (ENOMEM);
2086
2087 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2088
2089 /* Load the address of the statstics block. */
2090 ctx.sc = sc;
2091 ctx.bge_maxsegs = 1;
2092
2093 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2094 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2095 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2096
2097 if (error)
2098 return (ENOMEM);
2099
2100 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2101
2102 return (0);
2103}
2104
2105static int
2106bge_attach(device_t dev)
2107{
2108 struct ifnet *ifp;
2109 struct bge_softc *sc;
2110 uint32_t hwcfg = 0;
2111 uint32_t mac_tmp = 0;
2112 u_char eaddr[6];
2113 int error = 0, rid;
2114 int trys;
2115
2116 sc = device_get_softc(dev);
2117 sc->bge_dev = dev;
2118
2119 /*
2120 * Map control/status registers.
2121 */
2122 pci_enable_busmaster(dev);
2123
2124 rid = BGE_PCI_BAR0;
2125 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2126 RF_ACTIVE|PCI_RF_DENSE);
2127
2128 if (sc->bge_res == NULL) {
2129 device_printf (sc->bge_dev, "couldn't map memory\n");
2130 error = ENXIO;
2131 goto fail;
2132 }
2133
2134 sc->bge_btag = rman_get_bustag(sc->bge_res);
2135 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2136
2137 /* Allocate interrupt. */
2138 rid = 0;
2139
2140 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2141 RF_SHAREABLE | RF_ACTIVE);
2142
2143 if (sc->bge_irq == NULL) {
2144 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2145 error = ENXIO;
2146 goto fail;
2147 }
2148
2149 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2150
2151 /* Save ASIC rev. */
2152
2153 sc->bge_chipid =
2154 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2155 BGE_PCIMISCCTL_ASICREV;
2156 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2157 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2158
2159 /*
2160 * XXX: Broadcom Linux driver. Not in specs or eratta.
2161 * PCI-Express?
2162 */
2163 if (BGE_IS_5705_OR_BEYOND(sc)) {
2164 uint32_t v;
2165
2166 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2167 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2168 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2169 if ((v & 0xff) == BGE_PCIE_CAPID)
2170 sc->bge_flags |= BGE_FLAG_PCIE;
2171 }
2172 }
2173
2174 /*
2175 * PCI-X ?
2176 */
2177 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2178 BGE_PCISTATE_PCI_BUSMODE) == 0)
2179 sc->bge_flags |= BGE_FLAG_PCIX;
2180
2181 /* Try to reset the chip. */
2182 if (bge_reset(sc)) {
2183 device_printf(sc->bge_dev, "chip reset failed\n");
2184 bge_release_resources(sc);
2185 error = ENXIO;
2186 goto fail;
2187 }
2188
2189 sc->bge_asf_mode = 0;
2190 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2191 == BGE_MAGIC_NUMBER)) {
2192 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2193 & BGE_HWCFG_ASF) {
2194 sc->bge_asf_mode |= ASF_ENABLE;
2195 sc->bge_asf_mode |= ASF_STACKUP;
2196 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2197 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2198 }
2199 }
2200 }
2201
2202 /* Try to reset the chip again the nice way. */
2203 bge_stop_fw(sc);
2204 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2205 if (bge_reset(sc)) {
2206 device_printf(sc->bge_dev, "chip reset failed\n");
2207 bge_release_resources(sc);
2208 error = ENXIO;
2209 goto fail;
2210 }
2211
2212 bge_sig_legacy(sc, BGE_RESET_STOP);
2213 bge_sig_post_reset(sc, BGE_RESET_STOP);
2214
2215 if (bge_chipinit(sc)) {
2216 device_printf(sc->bge_dev, "chip initialization failed\n");
2217 bge_release_resources(sc);
2218 error = ENXIO;
2219 goto fail;
2220 }
2221
2222 /*
2223 * Get station address from the EEPROM.
2224 */
2225 mac_tmp = bge_readmem_ind(sc, 0x0c14);
2226 if ((mac_tmp >> 16) == 0x484b) {
2227 eaddr[0] = (u_char)(mac_tmp >> 8);
2228 eaddr[1] = (u_char)mac_tmp;
2229 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2230 eaddr[2] = (u_char)(mac_tmp >> 24);
2231 eaddr[3] = (u_char)(mac_tmp >> 16);
2232 eaddr[4] = (u_char)(mac_tmp >> 8);
2233 eaddr[5] = (u_char)mac_tmp;
2234 } else if (bge_read_eeprom(sc, eaddr,
2235 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2236 device_printf(sc->bge_dev, "failed to read station address\n");
2237 bge_release_resources(sc);
2238 error = ENXIO;
2239 goto fail;
2240 }
2241
2242 /* 5705 limits RX return ring to 512 entries. */
2243 if (BGE_IS_5705_OR_BEYOND(sc))
2244 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2245 else
2246 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2247
2248 if (bge_dma_alloc(dev)) {
2249 device_printf(sc->bge_dev,
2250 "failed to allocate DMA resources\n");
2251 bge_release_resources(sc);
2252 error = ENXIO;
2253 goto fail;
2254 }
2255
2256 /* Set default tuneable values. */
2257 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2258 sc->bge_rx_coal_ticks = 150;
2259 sc->bge_tx_coal_ticks = 150;
2260 sc->bge_rx_max_coal_bds = 64;
2261 sc->bge_tx_max_coal_bds = 128;
2262
2263 /* Set up ifnet structure */
2264 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2265 if (ifp == NULL) {
2266 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2267 bge_release_resources(sc);
2268 error = ENXIO;
2269 goto fail;
2270 }
2271 ifp->if_softc = sc;
2272 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2274 ifp->if_ioctl = bge_ioctl;
2275 ifp->if_start = bge_start;
2276 ifp->if_init = bge_init;
2277 ifp->if_mtu = ETHERMTU;
2278 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2279 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2280 IFQ_SET_READY(&ifp->if_snd);
2281 ifp->if_hwassist = BGE_CSUM_FEATURES;
2282 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2283 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2284 ifp->if_capenable = ifp->if_capabilities;
2285#ifdef DEVICE_POLLING
2286 ifp->if_capabilities |= IFCAP_POLLING;
2287#endif
2288
2289 /*
2290 * 5700 B0 chips do not support checksumming correctly due
2291 * to hardware bugs.
2292 */
2293 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2294 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2295 ifp->if_capenable &= IFCAP_HWCSUM;
2296 ifp->if_hwassist = 0;
2297 }
2298
2299 /*
2300 * Figure out what sort of media we have by checking the
2301 * hardware config word in the first 32k of NIC internal memory,
2302 * or fall back to examining the EEPROM if necessary.
2303 * Note: on some BCM5700 cards, this value appears to be unset.
2304 * If that's the case, we have to rely on identifying the NIC
2305 * by its PCI subsystem ID, as we do below for the SysKonnect
2306 * SK-9D41.
2307 */
2308 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2309 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2310 else {
2311 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2312 sizeof(hwcfg))) {
2313 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2314 bge_release_resources(sc);
2315 error = ENXIO;
2316 goto fail;
2317 }
2318 hwcfg = ntohl(hwcfg);
2319 }
2320
2321 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2322 sc->bge_flags |= BGE_FLAG_TBI;
2323
2324 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2325 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2326 sc->bge_flags |= BGE_FLAG_TBI;
2327
2328 if (sc->bge_flags & BGE_FLAG_TBI) {
2329 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2330 bge_ifmedia_upd, bge_ifmedia_sts);
2331 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2332 ifmedia_add(&sc->bge_ifmedia,
2333 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2334 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2335 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2336 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2337 } else {
2338 /*
2339 * Do transceiver setup and tell the firmware the
2340 * driver is down so we can try to get access the
2341 * probe if ASF is running. Retry a couple of times
2342 * if we get a conflict with the ASF firmware accessing
2343 * the PHY.
2344 */
2345 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2346again:
2347 bge_asf_driver_up(sc);
2348
2349 trys = 0;
2350 if (mii_phy_probe(dev, &sc->bge_miibus,
2351 bge_ifmedia_upd, bge_ifmedia_sts)) {
2352 if (trys++ < 4) {
2353 device_printf(sc->bge_dev, "Try again\n");
2354 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET);
2355 goto again;
2356 }
2357
2358 device_printf(sc->bge_dev, "MII without any PHY!\n");
2359 bge_release_resources(sc);
2360 error = ENXIO;
2361 goto fail;
2362 }
2363
2364 /*
2365 * Now tell the firmware we are going up after probing the PHY
2366 */
2367 if (sc->bge_asf_mode & ASF_STACKUP)
2368 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2369 }
2370
2371 /*
2372 * When using the BCM5701 in PCI-X mode, data corruption has
2373 * been observed in the first few bytes of some received packets.
2374 * Aligning the packet buffer in memory eliminates the corruption.
2375 * Unfortunately, this misaligns the packet payloads. On platforms
2376 * which do not support unaligned accesses, we will realign the
2377 * payloads by copying the received packets.
2378 */
2379 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2380 sc->bge_flags & BGE_FLAG_PCIX)
2381 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2382
2383 /*
2384 * Call MI attach routine.
2385 */
2386 ether_ifattach(ifp, eaddr);
2387 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2388
2389 /*
2390 * Hookup IRQ last.
2391 */
2392 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2393 bge_intr, sc, &sc->bge_intrhand);
2394
2395 if (error) {
2396 bge_detach(dev);
2397 device_printf(sc->bge_dev, "couldn't set up irq\n");
2398 }
2399
2400fail:
2401 return (error);
2402}
2403
2404static int
2405bge_detach(device_t dev)
2406{
2407 struct bge_softc *sc;
2408 struct ifnet *ifp;
2409
2410 sc = device_get_softc(dev);
2411 ifp = sc->bge_ifp;
2412
2413#ifdef DEVICE_POLLING
2414 if (ifp->if_capenable & IFCAP_POLLING)
2415 ether_poll_deregister(ifp);
2416#endif
2417
2418 BGE_LOCK(sc);
2419 bge_stop(sc);
2420 bge_reset(sc);
2421 BGE_UNLOCK(sc);
2422
2423 ether_ifdetach(ifp);
2424
2425 if (sc->bge_flags & BGE_FLAG_TBI) {
2426 ifmedia_removeall(&sc->bge_ifmedia);
2427 } else {
2428 bus_generic_detach(dev);
2429 device_delete_child(dev, sc->bge_miibus);
2430 }
2431
2432 bge_release_resources(sc);
2433
2434 return (0);
2435}
2436
2437static void
2438bge_release_resources(struct bge_softc *sc)
2439{
2440 device_t dev;
2441
2442 dev = sc->bge_dev;
2443
2444 if (sc->bge_vpd_prodname != NULL)
2445 free(sc->bge_vpd_prodname, M_DEVBUF);
2446
2447 if (sc->bge_vpd_readonly != NULL)
2448 free(sc->bge_vpd_readonly, M_DEVBUF);
2449
2450 if (sc->bge_intrhand != NULL)
2451 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2452
2453 if (sc->bge_irq != NULL)
2454 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2455
2456 if (sc->bge_res != NULL)
2457 bus_release_resource(dev, SYS_RES_MEMORY,
2458 BGE_PCI_BAR0, sc->bge_res);
2459
2460 if (sc->bge_ifp != NULL)
2461 if_free(sc->bge_ifp);
2462
2463 bge_dma_free(sc);
2464
2465 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2466 BGE_LOCK_DESTROY(sc);
2467}
2468
2469static int
2470bge_reset(struct bge_softc *sc)
2471{
2472 device_t dev;
2473 uint32_t cachesize, command, pcistate, reset;
2474 int i, val = 0;
2475
2476 dev = sc->bge_dev;
2477
2478 /* Save some important PCI state. */
2479 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2480 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2481 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2482
2483 pci_write_config(dev, BGE_PCI_MISC_CTL,
2484 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2485 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2486
2487 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2488
2489 /* XXX: Broadcom Linux driver. */
2490 if (sc->bge_flags & BGE_FLAG_PCIE) {
2491 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2492 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2493 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2494 /* Prevent PCIE link training during global reset */
2495 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2496 reset |= (1<<29);
2497 }
2498 }
2499
2500 /*
2501 * Write the magic number to the firmware mailbox at 0xb50
2502 * so that the driver can synchronize with the firmware.
2503 */
2504 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2505
2506 /* Issue global reset */
2507 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2508
2509 DELAY(1000);
2510
2511 /* XXX: Broadcom Linux driver. */
2512 if (sc->bge_flags & BGE_FLAG_PCIE) {
2513 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2514 uint32_t v;
2515
2516 DELAY(500000); /* wait for link training to complete */
2517 v = pci_read_config(dev, 0xc4, 4);
2518 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2519 }
2520 /* Set PCIE max payload size and clear error status. */
2521 pci_write_config(dev, 0xd8, 0xf5000, 4);
2522 }
2523
2524 /* Reset some of the PCI state that got zapped by reset. */
2525 pci_write_config(dev, BGE_PCI_MISC_CTL,
2526 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2527 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2528 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2529 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2530 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2531
2532 /* Enable memory arbiter. */
2533 if (BGE_IS_5714_FAMILY(sc)) {
2534 uint32_t val;
2535
2536 val = CSR_READ_4(sc, BGE_MARB_MODE);
2537 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2538 } else
2539 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2540
2541 /*
2542 * Poll the value location we just wrote until
2543 * we see the 1's complement of the magic number.
2544 * This indicates that the firmware initialization
2545 * is complete.
2546 */
2547 for (i = 0; i < BGE_TIMEOUT; i++) {
2548 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2549 if (val == ~BGE_MAGIC_NUMBER)
2550 break;
2551 DELAY(10);
2552 }
2553
2554 if (i == BGE_TIMEOUT) {
2555 device_printf(sc->bge_dev, "firmware handshake timed out\n");
2556 return(0);
2557 }
2558
2559 /*
2560 * XXX Wait for the value of the PCISTATE register to
2561 * return to its original pre-reset state. This is a
2562 * fairly good indicator of reset completion. If we don't
2563 * wait for the reset to fully complete, trying to read
2564 * from the device's non-PCI registers may yield garbage
2565 * results.
2566 */
2567 for (i = 0; i < BGE_TIMEOUT; i++) {
2568 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2569 break;
2570 DELAY(10);
2571 }
2572
2573 /* Fix up byte swapping. */
2574 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2575 BGE_MODECTL_BYTESWAP_DATA);
2576
2577 /* Tell the ASF firmware we are up */
2578 if (sc->bge_asf_mode & ASF_STACKUP)
2579 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2580
2581 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2582
2583 /*
2584 * The 5704 in TBI mode apparently needs some special
2585 * adjustment to insure the SERDES drive level is set
2586 * to 1.2V.
2587 */
2588 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2589 sc->bge_flags & BGE_FLAG_TBI) {
2590 uint32_t serdescfg;
2591
2592 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2593 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2594 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2595 }
2596
2597 /* XXX: Broadcom Linux driver. */
2598 if (sc->bge_flags & BGE_FLAG_PCIE &&
2599 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2600 uint32_t v;
2601
2602 v = CSR_READ_4(sc, 0x7c00);
2603 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2604 }
2605 DELAY(10000);
2606
2607 return(0);
2608}
2609
2610/*
2611 * Frame reception handling. This is called if there's a frame
2612 * on the receive return list.
2613 *
2614 * Note: we have to be able to handle two possibilities here:
2615 * 1) the frame is from the jumbo receive ring
2616 * 2) the frame is from the standard receive ring
2617 */
2618
2619static void
2620bge_rxeof(struct bge_softc *sc)
2621{
2622 struct ifnet *ifp;
2623 int stdcnt = 0, jumbocnt = 0;
2624
2625 BGE_LOCK_ASSERT(sc);
2626
2627 /* Nothing to do. */
2628 if (sc->bge_rx_saved_considx ==
2629 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2630 return;
2631
2632 ifp = sc->bge_ifp;
2633
2634 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2635 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2636 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2637 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2638 if (BGE_IS_JUMBO_CAPABLE(sc))
2639 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2640 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2641
2642 while(sc->bge_rx_saved_considx !=
2643 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2644 struct bge_rx_bd *cur_rx;
2645 uint32_t rxidx;
2646 struct mbuf *m = NULL;
2647 uint16_t vlan_tag = 0;
2648 int have_tag = 0;
2649
2650#ifdef DEVICE_POLLING
2651 if (ifp->if_capenable & IFCAP_POLLING) {
2652 if (sc->rxcycles <= 0)
2653 break;
2654 sc->rxcycles--;
2655 }
2656#endif
2657
2658 cur_rx =
2659 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2660
2661 rxidx = cur_rx->bge_idx;
2662 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2663
2664 if (!(ifp->if_flags & IFF_PROMISC) &&
2665 (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)) {
2666 have_tag = 1;
2667 vlan_tag = cur_rx->bge_vlan_tag;
2668 }
2669
2670 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2671 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2672 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2673 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2674 BUS_DMASYNC_POSTREAD);
2675 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2676 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2677 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2678 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2679 jumbocnt++;
2680 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2681 ifp->if_ierrors++;
2682 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2683 continue;
2684 }
2685 if (bge_newbuf_jumbo(sc,
2686 sc->bge_jumbo, NULL) == ENOBUFS) {
2687 ifp->if_ierrors++;
2688 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2689 continue;
2690 }
2691 } else {
2692 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2693 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2694 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2695 BUS_DMASYNC_POSTREAD);
2696 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2697 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2698 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2699 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2700 stdcnt++;
2701 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2702 ifp->if_ierrors++;
2703 bge_newbuf_std(sc, sc->bge_std, m);
2704 continue;
2705 }
2706 if (bge_newbuf_std(sc, sc->bge_std,
2707 NULL) == ENOBUFS) {
2708 ifp->if_ierrors++;
2709 bge_newbuf_std(sc, sc->bge_std, m);
2710 continue;
2711 }
2712 }
2713
2714 ifp->if_ipackets++;
2715#ifndef __NO_STRICT_ALIGNMENT
2716 /*
2717 * For architectures with strict alignment we must make sure
2718 * the payload is aligned.
2719 */
2720 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2721 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2722 cur_rx->bge_len);
2723 m->m_data += ETHER_ALIGN;
2724 }
2725#endif
2726 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2727 m->m_pkthdr.rcvif = ifp;
2728
2729 if (ifp->if_capenable & IFCAP_RXCSUM) {
2730 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2731 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2732 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2733 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2734 }
2735 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2736 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2737 m->m_pkthdr.csum_data =
2738 cur_rx->bge_tcp_udp_csum;
2739 m->m_pkthdr.csum_flags |=
2740 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2741 }
2742 }
2743
2744 /*
2745 * If we received a packet with a vlan tag,
2746 * attach that information to the packet.
2747 */
2748 if (have_tag) {
2749 m->m_pkthdr.ether_vtag = vlan_tag;
2750 m->m_flags |= M_VLANTAG;
2751 }
2752
2753 BGE_UNLOCK(sc);
2754 (*ifp->if_input)(ifp, m);
2755 BGE_LOCK(sc);
2756 }
2757
2758 if (stdcnt > 0)
2759 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2760 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2761
2762 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2763 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2764 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2765
2766 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2767 if (stdcnt)
2768 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2769 if (jumbocnt)
2770 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2771}
2772
2773static void
2774bge_txeof(struct bge_softc *sc)
2775{
2776 struct bge_tx_bd *cur_tx = NULL;
2777 struct ifnet *ifp;
2778
2779 BGE_LOCK_ASSERT(sc);
2780
2781 /* Nothing to do. */
2782 if (sc->bge_tx_saved_considx ==
2783 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2784 return;
2785
2786 ifp = sc->bge_ifp;
2787
2788 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2789 sc->bge_cdata.bge_tx_ring_map,
2790 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2791 /*
2792 * Go through our tx ring and free mbufs for those
2793 * frames that have been sent.
2794 */
2795 while (sc->bge_tx_saved_considx !=
2796 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2797 uint32_t idx = 0;
2798
2799 idx = sc->bge_tx_saved_considx;
2800 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2801 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2802 ifp->if_opackets++;
2803 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2804 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2805 sc->bge_cdata.bge_tx_dmamap[idx],
2806 BUS_DMASYNC_POSTWRITE);
2807 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2808 sc->bge_cdata.bge_tx_dmamap[idx]);
2809 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2810 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2811 }
2812 sc->bge_txcnt--;
2813 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2814 sc->bge_timer = 0;
2815 }
2816
2817 if (cur_tx != NULL)
2818 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2819}
2820
2821#ifdef DEVICE_POLLING
2822static void
2823bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2824{
2825 struct bge_softc *sc = ifp->if_softc;
2826 uint32_t statusword;
2827
2828 BGE_LOCK(sc);
2829 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2830 BGE_UNLOCK(sc);
2831 return;
2832 }
2833
2834 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2835 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2836
2837 statusword = atomic_readandclear_32(
2838 &sc->bge_ldata.bge_status_block->bge_status);
2839
2840 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2841 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2842
2843 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2844 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2845 sc->bge_link_evt++;
2846
2847 if (cmd == POLL_AND_CHECK_STATUS)
2848 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2849 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2850 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
2851 bge_link_upd(sc);
2852
2853 sc->rxcycles = count;
2854 bge_rxeof(sc);
2855 bge_txeof(sc);
2856 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2857 bge_start_locked(ifp);
2858
2859 BGE_UNLOCK(sc);
2860}
2861#endif /* DEVICE_POLLING */
2862
2863static void
2864bge_intr(void *xsc)
2865{
2866 struct bge_softc *sc;
2867 struct ifnet *ifp;
2868 uint32_t statusword;
2869
2870 sc = xsc;
2871
2872 BGE_LOCK(sc);
2873
2874 ifp = sc->bge_ifp;
2875
2876#ifdef DEVICE_POLLING
2877 if (ifp->if_capenable & IFCAP_POLLING) {
2878 BGE_UNLOCK(sc);
2879 return;
2880 }
2881#endif
2882
2883 /*
2884 * Do the mandatory PCI flush as well as get the link status.
2885 */
2886 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2887
2888 /* Ack interrupt and stop others from occuring. */
2889 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2890
2891 /* Make sure the descriptor ring indexes are coherent. */
2892 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2893 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2894 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2895 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2896
2897 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2898 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2899 statusword || sc->bge_link_evt)
2900 bge_link_upd(sc);
2901
2902 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2903 /* Check RX return ring producer/consumer. */
2904 bge_rxeof(sc);
2905
2906 /* Check TX ring producer/consumer. */
2907 bge_txeof(sc);
2908 }
2909
2910 /* Re-enable interrupts. */
2911 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2912
2913 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2914 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2915 bge_start_locked(ifp);
2916
2917 BGE_UNLOCK(sc);
2918}
2919
2920static void
2921bge_asf_driver_up(struct bge_softc *sc)
2922{
2923 if (sc->bge_asf_mode & ASF_STACKUP) {
2924 /* Send ASF heartbeat aprox. every 2s */
2925 if (sc->bge_asf_count)
2926 sc->bge_asf_count --;
2927 else {
2928 sc->bge_asf_count = 5;
2929 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
2930 BGE_FW_DRV_ALIVE);
2931 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
2932 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
2933 CSR_WRITE_4(sc, BGE_CPU_EVENT,
2934 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
2935 }
2936 }
2937}
2938
2939static void
2940bge_tick(void *xsc)
2941{
2942 struct bge_softc *sc = xsc;
2943 struct mii_data *mii = NULL;
2944
2945 BGE_LOCK_ASSERT(sc);
2946
2947 if (BGE_IS_5705_OR_BEYOND(sc))
2948 bge_stats_update_regs(sc);
2949 else
2950 bge_stats_update(sc);
2951
2952 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
2953 mii = device_get_softc(sc->bge_miibus);
2954 /* Don't mess with the PHY in IPMI/ASF mode */
2955 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link)))
2956 mii_tick(mii);
2957 } else {
2958 /*
2959 * Since in TBI mode auto-polling can't be used we should poll
2960 * link status manually. Here we register pending link event
2961 * and trigger interrupt.
2962 */
2963#ifdef DEVICE_POLLING
2964 /* In polling mode we poll link state in bge_poll(). */
2965 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2966#endif
2967 {
2968 sc->bge_link_evt++;
2969 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2970 }
2971 }
2972
2973 bge_asf_driver_up(sc);
2974 bge_watchdog(sc);
2975
2976 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2977}
2978
2979static void
2980bge_stats_update_regs(struct bge_softc *sc)
2981{
2982 struct bge_mac_stats_regs stats;
2983 struct ifnet *ifp;
2982 struct ifnet *ifp;
2984 uint32_t *s;
2985 u_long cnt; /* current register value */
2986 int i;
2983 uint32_t cnt; /* current register value */
2987
2988 ifp = sc->bge_ifp;
2989
2984
2985 ifp = sc->bge_ifp;
2986
2990 s = (uint32_t *)&stats;
2991 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2992 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2993 s++;
2994 }
2995
2996 cnt = stats.dot3StatsSingleCollisionFrames +
2997 stats.dot3StatsMultipleCollisionFrames +
2998 stats.dot3StatsExcessiveCollisions +
2999 stats.dot3StatsLateCollisions;
3000 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
3001 cnt - sc->bge_tx_collisions : cnt;
2987 cnt = CSR_READ_4(sc, BGE_MAC_STATS +
2988 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
2989 ifp->if_collisions += (u_long)(cnt - sc->bge_tx_collisions);
3002 sc->bge_tx_collisions = cnt;
2990 sc->bge_tx_collisions = cnt;
2991
2992 cnt = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2993 ifp->if_ierrors += (u_long)(cnt - sc->bge_rx_discards);
2994 sc->bge_rx_discards = cnt;
3003}
3004
3005static void
3006bge_stats_update(struct bge_softc *sc)
3007{
3008 struct ifnet *ifp;
3009 bus_size_t stats;
2995}
2996
2997static void
2998bge_stats_update(struct bge_softc *sc)
2999{
3000 struct ifnet *ifp;
3001 bus_size_t stats;
3010 u_long cnt; /* current register value */
3002 uint32_t cnt; /* current register value */
3011
3012 ifp = sc->bge_ifp;
3013
3014 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3015
3016#define READ_STAT(sc, stats, stat) \
3017 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3018
3019 cnt = READ_STAT(sc, stats,
3020 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
3021 cnt += READ_STAT(sc, stats,
3022 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
3023 cnt += READ_STAT(sc, stats,
3024 txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
3025 cnt += READ_STAT(sc, stats,
3026 txstats.dot3StatsLateCollisions.bge_addr_lo);
3003
3004 ifp = sc->bge_ifp;
3005
3006 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3007
3008#define READ_STAT(sc, stats, stat) \
3009 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3010
3011 cnt = READ_STAT(sc, stats,
3012 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
3013 cnt += READ_STAT(sc, stats,
3014 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
3015 cnt += READ_STAT(sc, stats,
3016 txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
3017 cnt += READ_STAT(sc, stats,
3018 txstats.dot3StatsLateCollisions.bge_addr_lo);
3027 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
3028 cnt - sc->bge_tx_collisions : cnt;
3019 ifp->if_collisions += (u_long)(cnt - sc->bge_tx_collisions);
3029 sc->bge_tx_collisions = cnt;
3030
3031 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3020 sc->bge_tx_collisions = cnt;
3021
3022 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3032 ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
3033 cnt - sc->bge_rx_discards : cnt;
3023 ifp->if_ierrors += (u_long)(cnt - sc->bge_rx_discards);
3034 sc->bge_rx_discards = cnt;
3035
3036 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3024 sc->bge_rx_discards = cnt;
3025
3026 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3037 ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
3038 cnt - sc->bge_tx_discards : cnt;
3027 ifp->if_oerrors += (u_long)(cnt - sc->bge_tx_discards);
3039 sc->bge_tx_discards = cnt;
3040
3041#undef READ_STAT
3042}
3043
3044/*
3045 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3046 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3047 * but when such padded frames employ the bge IP/TCP checksum offload,
3048 * the hardware checksum assist gives incorrect results (possibly
3049 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3050 * If we pad such runts with zeros, the onboard checksum comes out correct.
3051 */
3052static __inline int
3053bge_cksum_pad(struct mbuf *m)
3054{
3055 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3056 struct mbuf *last;
3057
3058 /* If there's only the packet-header and we can pad there, use it. */
3059 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3060 M_TRAILINGSPACE(m) >= padlen) {
3061 last = m;
3062 } else {
3063 /*
3064 * Walk packet chain to find last mbuf. We will either
3065 * pad there, or append a new mbuf and pad it.
3066 */
3067 for (last = m; last->m_next != NULL; last = last->m_next);
3068 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3069 /* Allocate new empty mbuf, pad it. Compact later. */
3070 struct mbuf *n;
3071
3072 MGET(n, M_DONTWAIT, MT_DATA);
3073 if (n == NULL)
3074 return (ENOBUFS);
3075 n->m_len = 0;
3076 last->m_next = n;
3077 last = n;
3078 }
3079 }
3080
3081 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3082 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3083 last->m_len += padlen;
3084 m->m_pkthdr.len += padlen;
3085
3086 return (0);
3087}
3088
3089/*
3090 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3091 * pointers to descriptors.
3092 */
3093static int
3094bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3095{
3096 bus_dma_segment_t segs[BGE_NSEG_NEW];
3097 bus_dmamap_t map;
3098 struct bge_tx_bd *d;
3099 struct mbuf *m = *m_head;
3100 uint32_t idx = *txidx;
3101 uint16_t csum_flags;
3102 int nsegs, i, error;
3103
3104 csum_flags = 0;
3105 if (m->m_pkthdr.csum_flags) {
3106 if (m->m_pkthdr.csum_flags & CSUM_IP)
3107 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3108 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3109 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3110 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3111 (error = bge_cksum_pad(m)) != 0) {
3112 m_freem(m);
3113 *m_head = NULL;
3114 return (error);
3115 }
3116 }
3117 if (m->m_flags & M_LASTFRAG)
3118 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3119 else if (m->m_flags & M_FRAG)
3120 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3121 }
3122
3123 map = sc->bge_cdata.bge_tx_dmamap[idx];
3124 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3125 &nsegs, BUS_DMA_NOWAIT);
3126 if (error == EFBIG) {
3127 m = m_defrag(m, M_DONTWAIT);
3128 if (m == NULL) {
3129 m_freem(*m_head);
3130 *m_head = NULL;
3131 return (ENOBUFS);
3132 }
3133 *m_head = m;
3134 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3135 segs, &nsegs, BUS_DMA_NOWAIT);
3136 if (error) {
3137 m_freem(m);
3138 *m_head = NULL;
3139 return (error);
3140 }
3141 } else if (error != 0)
3142 return (error);
3143
3144 /*
3145 * Sanity check: avoid coming within 16 descriptors
3146 * of the end of the ring.
3147 */
3148 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3149 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3150 return (ENOBUFS);
3151 }
3152
3153 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3154
3155 for (i = 0; ; i++) {
3156 d = &sc->bge_ldata.bge_tx_ring[idx];
3157 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3158 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3159 d->bge_len = segs[i].ds_len;
3160 d->bge_flags = csum_flags;
3161 if (i == nsegs - 1)
3162 break;
3163 BGE_INC(idx, BGE_TX_RING_CNT);
3164 }
3165
3166 /* Mark the last segment as end of packet... */
3167 d->bge_flags |= BGE_TXBDFLAG_END;
3168
3169 /* ... and put VLAN tag into first segment. */
3170 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3171 if (m->m_flags & M_VLANTAG) {
3172 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3173 d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3174 } else
3175 d->bge_vlan_tag = 0;
3176
3177 /*
3178 * Insure that the map for this transmission
3179 * is placed at the array index of the last descriptor
3180 * in this chain.
3181 */
3182 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3183 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3184 sc->bge_cdata.bge_tx_chain[idx] = m;
3185 sc->bge_txcnt += nsegs;
3186
3187 BGE_INC(idx, BGE_TX_RING_CNT);
3188 *txidx = idx;
3189
3190 return (0);
3191}
3192
3193/*
3194 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3195 * to the mbuf data regions directly in the transmit descriptors.
3196 */
3197static void
3198bge_start_locked(struct ifnet *ifp)
3199{
3200 struct bge_softc *sc;
3201 struct mbuf *m_head = NULL;
3202 uint32_t prodidx;
3203 int count = 0;
3204
3205 sc = ifp->if_softc;
3206
3207 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3208 return;
3209
3210 prodidx = sc->bge_tx_prodidx;
3211
3212 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3213 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3214 if (m_head == NULL)
3215 break;
3216
3217 /*
3218 * XXX
3219 * The code inside the if() block is never reached since we
3220 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3221 * requests to checksum TCP/UDP in a fragmented packet.
3222 *
3223 * XXX
3224 * safety overkill. If this is a fragmented packet chain
3225 * with delayed TCP/UDP checksums, then only encapsulate
3226 * it if we have enough descriptors to handle the entire
3227 * chain at once.
3228 * (paranoia -- may not actually be needed)
3229 */
3230 if (m_head->m_flags & M_FIRSTFRAG &&
3231 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3232 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3233 m_head->m_pkthdr.csum_data + 16) {
3234 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3235 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3236 break;
3237 }
3238 }
3239
3240 /*
3241 * Pack the data into the transmit ring. If we
3242 * don't have room, set the OACTIVE flag and wait
3243 * for the NIC to drain the ring.
3244 */
3245 if (bge_encap(sc, &m_head, &prodidx)) {
3246 if (m_head == NULL)
3247 break;
3248 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3249 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3250 break;
3251 }
3252 ++count;
3253
3254 /*
3255 * If there's a BPF listener, bounce a copy of this frame
3256 * to him.
3257 */
3258 BPF_MTAP(ifp, m_head);
3259 }
3260
3261 if (count == 0)
3262 /* No packets were dequeued. */
3263 return;
3264
3265 /* Transmit. */
3266 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3267 /* 5700 b2 errata */
3268 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3269 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3270
3271 sc->bge_tx_prodidx = prodidx;
3272
3273 /*
3274 * Set a timeout in case the chip goes out to lunch.
3275 */
3276 sc->bge_timer = 5;
3277}
3278
3279/*
3280 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3281 * to the mbuf data regions directly in the transmit descriptors.
3282 */
3283static void
3284bge_start(struct ifnet *ifp)
3285{
3286 struct bge_softc *sc;
3287
3288 sc = ifp->if_softc;
3289 BGE_LOCK(sc);
3290 bge_start_locked(ifp);
3291 BGE_UNLOCK(sc);
3292}
3293
3294static void
3295bge_init_locked(struct bge_softc *sc)
3296{
3297 struct ifnet *ifp;
3298 uint16_t *m;
3299
3300 BGE_LOCK_ASSERT(sc);
3301
3302 ifp = sc->bge_ifp;
3303
3304 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3305 return;
3306
3307 /* Cancel pending I/O and flush buffers. */
3308 bge_stop(sc);
3309
3310 bge_stop_fw(sc);
3311 bge_sig_pre_reset(sc, BGE_RESET_START);
3312 bge_reset(sc);
3313 bge_sig_legacy(sc, BGE_RESET_START);
3314 bge_sig_post_reset(sc, BGE_RESET_START);
3315
3316 bge_chipinit(sc);
3317
3318 /*
3319 * Init the various state machines, ring
3320 * control blocks and firmware.
3321 */
3322 if (bge_blockinit(sc)) {
3323 device_printf(sc->bge_dev, "initialization failure\n");
3324 return;
3325 }
3326
3327 ifp = sc->bge_ifp;
3328
3329 /* Specify MTU. */
3330 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3331 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3332
3333 /* Load our MAC address. */
3334 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3335 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3336 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3337
3338 /* Program promiscuous mode. */
3339 bge_setpromisc(sc);
3340
3341 /* Program multicast filter. */
3342 bge_setmulti(sc);
3343
3344 /* Init RX ring. */
3345 bge_init_rx_ring_std(sc);
3346
3347 /*
3348 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3349 * memory to insure that the chip has in fact read the first
3350 * entry of the ring.
3351 */
3352 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3353 uint32_t v, i;
3354 for (i = 0; i < 10; i++) {
3355 DELAY(20);
3356 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3357 if (v == (MCLBYTES - ETHER_ALIGN))
3358 break;
3359 }
3360 if (i == 10)
3361 device_printf (sc->bge_dev,
3362 "5705 A0 chip failed to load RX ring\n");
3363 }
3364
3365 /* Init jumbo RX ring. */
3366 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3367 bge_init_rx_ring_jumbo(sc);
3368
3369 /* Init our RX return ring index. */
3370 sc->bge_rx_saved_considx = 0;
3371
3028 sc->bge_tx_discards = cnt;
3029
3030#undef READ_STAT
3031}
3032
3033/*
3034 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3035 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3036 * but when such padded frames employ the bge IP/TCP checksum offload,
3037 * the hardware checksum assist gives incorrect results (possibly
3038 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3039 * If we pad such runts with zeros, the onboard checksum comes out correct.
3040 */
3041static __inline int
3042bge_cksum_pad(struct mbuf *m)
3043{
3044 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3045 struct mbuf *last;
3046
3047 /* If there's only the packet-header and we can pad there, use it. */
3048 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3049 M_TRAILINGSPACE(m) >= padlen) {
3050 last = m;
3051 } else {
3052 /*
3053 * Walk packet chain to find last mbuf. We will either
3054 * pad there, or append a new mbuf and pad it.
3055 */
3056 for (last = m; last->m_next != NULL; last = last->m_next);
3057 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3058 /* Allocate new empty mbuf, pad it. Compact later. */
3059 struct mbuf *n;
3060
3061 MGET(n, M_DONTWAIT, MT_DATA);
3062 if (n == NULL)
3063 return (ENOBUFS);
3064 n->m_len = 0;
3065 last->m_next = n;
3066 last = n;
3067 }
3068 }
3069
3070 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3071 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3072 last->m_len += padlen;
3073 m->m_pkthdr.len += padlen;
3074
3075 return (0);
3076}
3077
3078/*
3079 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3080 * pointers to descriptors.
3081 */
3082static int
3083bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3084{
3085 bus_dma_segment_t segs[BGE_NSEG_NEW];
3086 bus_dmamap_t map;
3087 struct bge_tx_bd *d;
3088 struct mbuf *m = *m_head;
3089 uint32_t idx = *txidx;
3090 uint16_t csum_flags;
3091 int nsegs, i, error;
3092
3093 csum_flags = 0;
3094 if (m->m_pkthdr.csum_flags) {
3095 if (m->m_pkthdr.csum_flags & CSUM_IP)
3096 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3097 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3098 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3099 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3100 (error = bge_cksum_pad(m)) != 0) {
3101 m_freem(m);
3102 *m_head = NULL;
3103 return (error);
3104 }
3105 }
3106 if (m->m_flags & M_LASTFRAG)
3107 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3108 else if (m->m_flags & M_FRAG)
3109 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3110 }
3111
3112 map = sc->bge_cdata.bge_tx_dmamap[idx];
3113 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3114 &nsegs, BUS_DMA_NOWAIT);
3115 if (error == EFBIG) {
3116 m = m_defrag(m, M_DONTWAIT);
3117 if (m == NULL) {
3118 m_freem(*m_head);
3119 *m_head = NULL;
3120 return (ENOBUFS);
3121 }
3122 *m_head = m;
3123 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3124 segs, &nsegs, BUS_DMA_NOWAIT);
3125 if (error) {
3126 m_freem(m);
3127 *m_head = NULL;
3128 return (error);
3129 }
3130 } else if (error != 0)
3131 return (error);
3132
3133 /*
3134 * Sanity check: avoid coming within 16 descriptors
3135 * of the end of the ring.
3136 */
3137 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3138 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3139 return (ENOBUFS);
3140 }
3141
3142 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3143
3144 for (i = 0; ; i++) {
3145 d = &sc->bge_ldata.bge_tx_ring[idx];
3146 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3147 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3148 d->bge_len = segs[i].ds_len;
3149 d->bge_flags = csum_flags;
3150 if (i == nsegs - 1)
3151 break;
3152 BGE_INC(idx, BGE_TX_RING_CNT);
3153 }
3154
3155 /* Mark the last segment as end of packet... */
3156 d->bge_flags |= BGE_TXBDFLAG_END;
3157
3158 /* ... and put VLAN tag into first segment. */
3159 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3160 if (m->m_flags & M_VLANTAG) {
3161 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3162 d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3163 } else
3164 d->bge_vlan_tag = 0;
3165
3166 /*
3167 * Insure that the map for this transmission
3168 * is placed at the array index of the last descriptor
3169 * in this chain.
3170 */
3171 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3172 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3173 sc->bge_cdata.bge_tx_chain[idx] = m;
3174 sc->bge_txcnt += nsegs;
3175
3176 BGE_INC(idx, BGE_TX_RING_CNT);
3177 *txidx = idx;
3178
3179 return (0);
3180}
3181
3182/*
3183 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3184 * to the mbuf data regions directly in the transmit descriptors.
3185 */
3186static void
3187bge_start_locked(struct ifnet *ifp)
3188{
3189 struct bge_softc *sc;
3190 struct mbuf *m_head = NULL;
3191 uint32_t prodidx;
3192 int count = 0;
3193
3194 sc = ifp->if_softc;
3195
3196 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3197 return;
3198
3199 prodidx = sc->bge_tx_prodidx;
3200
3201 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3202 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3203 if (m_head == NULL)
3204 break;
3205
3206 /*
3207 * XXX
3208 * The code inside the if() block is never reached since we
3209 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3210 * requests to checksum TCP/UDP in a fragmented packet.
3211 *
3212 * XXX
3213 * safety overkill. If this is a fragmented packet chain
3214 * with delayed TCP/UDP checksums, then only encapsulate
3215 * it if we have enough descriptors to handle the entire
3216 * chain at once.
3217 * (paranoia -- may not actually be needed)
3218 */
3219 if (m_head->m_flags & M_FIRSTFRAG &&
3220 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3221 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3222 m_head->m_pkthdr.csum_data + 16) {
3223 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3224 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3225 break;
3226 }
3227 }
3228
3229 /*
3230 * Pack the data into the transmit ring. If we
3231 * don't have room, set the OACTIVE flag and wait
3232 * for the NIC to drain the ring.
3233 */
3234 if (bge_encap(sc, &m_head, &prodidx)) {
3235 if (m_head == NULL)
3236 break;
3237 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3238 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3239 break;
3240 }
3241 ++count;
3242
3243 /*
3244 * If there's a BPF listener, bounce a copy of this frame
3245 * to him.
3246 */
3247 BPF_MTAP(ifp, m_head);
3248 }
3249
3250 if (count == 0)
3251 /* No packets were dequeued. */
3252 return;
3253
3254 /* Transmit. */
3255 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3256 /* 5700 b2 errata */
3257 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3258 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3259
3260 sc->bge_tx_prodidx = prodidx;
3261
3262 /*
3263 * Set a timeout in case the chip goes out to lunch.
3264 */
3265 sc->bge_timer = 5;
3266}
3267
3268/*
3269 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3270 * to the mbuf data regions directly in the transmit descriptors.
3271 */
3272static void
3273bge_start(struct ifnet *ifp)
3274{
3275 struct bge_softc *sc;
3276
3277 sc = ifp->if_softc;
3278 BGE_LOCK(sc);
3279 bge_start_locked(ifp);
3280 BGE_UNLOCK(sc);
3281}
3282
3283static void
3284bge_init_locked(struct bge_softc *sc)
3285{
3286 struct ifnet *ifp;
3287 uint16_t *m;
3288
3289 BGE_LOCK_ASSERT(sc);
3290
3291 ifp = sc->bge_ifp;
3292
3293 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3294 return;
3295
3296 /* Cancel pending I/O and flush buffers. */
3297 bge_stop(sc);
3298
3299 bge_stop_fw(sc);
3300 bge_sig_pre_reset(sc, BGE_RESET_START);
3301 bge_reset(sc);
3302 bge_sig_legacy(sc, BGE_RESET_START);
3303 bge_sig_post_reset(sc, BGE_RESET_START);
3304
3305 bge_chipinit(sc);
3306
3307 /*
3308 * Init the various state machines, ring
3309 * control blocks and firmware.
3310 */
3311 if (bge_blockinit(sc)) {
3312 device_printf(sc->bge_dev, "initialization failure\n");
3313 return;
3314 }
3315
3316 ifp = sc->bge_ifp;
3317
3318 /* Specify MTU. */
3319 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3320 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3321
3322 /* Load our MAC address. */
3323 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3324 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3325 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3326
3327 /* Program promiscuous mode. */
3328 bge_setpromisc(sc);
3329
3330 /* Program multicast filter. */
3331 bge_setmulti(sc);
3332
3333 /* Init RX ring. */
3334 bge_init_rx_ring_std(sc);
3335
3336 /*
3337 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3338 * memory to insure that the chip has in fact read the first
3339 * entry of the ring.
3340 */
3341 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3342 uint32_t v, i;
3343 for (i = 0; i < 10; i++) {
3344 DELAY(20);
3345 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3346 if (v == (MCLBYTES - ETHER_ALIGN))
3347 break;
3348 }
3349 if (i == 10)
3350 device_printf (sc->bge_dev,
3351 "5705 A0 chip failed to load RX ring\n");
3352 }
3353
3354 /* Init jumbo RX ring. */
3355 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3356 bge_init_rx_ring_jumbo(sc);
3357
3358 /* Init our RX return ring index. */
3359 sc->bge_rx_saved_considx = 0;
3360
3361 /* Init our RX/TX stat counters. */
3362 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3363
3372 /* Init TX ring. */
3373 bge_init_tx_ring(sc);
3374
3375 /* Turn on transmitter. */
3376 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3377
3378 /* Turn on receiver. */
3379 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3380
3381 /* Tell firmware we're alive. */
3382 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3383
3384#ifdef DEVICE_POLLING
3385 /* Disable interrupts if we are polling. */
3386 if (ifp->if_capenable & IFCAP_POLLING) {
3387 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3388 BGE_PCIMISCCTL_MASK_PCI_INTR);
3389 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3390 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3391 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3392 } else
3393#endif
3394
3395 /* Enable host interrupts. */
3396 {
3397 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3398 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3399 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3400 }
3401
3402 bge_ifmedia_upd_locked(ifp);
3403
3404 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3405 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3406
3407 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3408}
3409
3410static void
3411bge_init(void *xsc)
3412{
3413 struct bge_softc *sc = xsc;
3414
3415 BGE_LOCK(sc);
3416 bge_init_locked(sc);
3417 BGE_UNLOCK(sc);
3418}
3419
3420/*
3421 * Set media options.
3422 */
3423static int
3424bge_ifmedia_upd(struct ifnet *ifp)
3425{
3426 struct bge_softc *sc = ifp->if_softc;
3427 int res;
3428
3429 BGE_LOCK(sc);
3430 res = bge_ifmedia_upd_locked(ifp);
3431 BGE_UNLOCK(sc);
3432
3433 return (res);
3434}
3435
3436static int
3437bge_ifmedia_upd_locked(struct ifnet *ifp)
3438{
3439 struct bge_softc *sc = ifp->if_softc;
3440 struct mii_data *mii;
3441 struct ifmedia *ifm;
3442
3443 BGE_LOCK_ASSERT(sc);
3444
3445 ifm = &sc->bge_ifmedia;
3446
3447 /* If this is a 1000baseX NIC, enable the TBI port. */
3448 if (sc->bge_flags & BGE_FLAG_TBI) {
3449 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3450 return (EINVAL);
3451 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3452 case IFM_AUTO:
3453 /*
3454 * The BCM5704 ASIC appears to have a special
3455 * mechanism for programming the autoneg
3456 * advertisement registers in TBI mode.
3457 */
3458 if (bge_fake_autoneg == 0 &&
3459 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3460 uint32_t sgdig;
3461 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3462 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3463 sgdig |= BGE_SGDIGCFG_AUTO|
3464 BGE_SGDIGCFG_PAUSE_CAP|
3465 BGE_SGDIGCFG_ASYM_PAUSE;
3466 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3467 sgdig|BGE_SGDIGCFG_SEND);
3468 DELAY(5);
3469 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3470 }
3471 break;
3472 case IFM_1000_SX:
3473 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3474 BGE_CLRBIT(sc, BGE_MAC_MODE,
3475 BGE_MACMODE_HALF_DUPLEX);
3476 } else {
3477 BGE_SETBIT(sc, BGE_MAC_MODE,
3478 BGE_MACMODE_HALF_DUPLEX);
3479 }
3480 break;
3481 default:
3482 return (EINVAL);
3483 }
3484 return (0);
3485 }
3486
3487 sc->bge_link_evt++;
3488 mii = device_get_softc(sc->bge_miibus);
3489 if (mii->mii_instance) {
3490 struct mii_softc *miisc;
3491 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3492 miisc = LIST_NEXT(miisc, mii_list))
3493 mii_phy_reset(miisc);
3494 }
3495 mii_mediachg(mii);
3496
3497 return (0);
3498}
3499
3500/*
3501 * Report current media status.
3502 */
3503static void
3504bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3505{
3506 struct bge_softc *sc = ifp->if_softc;
3507 struct mii_data *mii;
3508
3509 BGE_LOCK(sc);
3510
3511 if (sc->bge_flags & BGE_FLAG_TBI) {
3512 ifmr->ifm_status = IFM_AVALID;
3513 ifmr->ifm_active = IFM_ETHER;
3514 if (CSR_READ_4(sc, BGE_MAC_STS) &
3515 BGE_MACSTAT_TBI_PCS_SYNCHED)
3516 ifmr->ifm_status |= IFM_ACTIVE;
3517 else {
3518 ifmr->ifm_active |= IFM_NONE;
3519 BGE_UNLOCK(sc);
3520 return;
3521 }
3522 ifmr->ifm_active |= IFM_1000_SX;
3523 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3524 ifmr->ifm_active |= IFM_HDX;
3525 else
3526 ifmr->ifm_active |= IFM_FDX;
3527 BGE_UNLOCK(sc);
3528 return;
3529 }
3530
3531 mii = device_get_softc(sc->bge_miibus);
3532 mii_pollstat(mii);
3533 ifmr->ifm_active = mii->mii_media_active;
3534 ifmr->ifm_status = mii->mii_media_status;
3535
3536 BGE_UNLOCK(sc);
3537}
3538
3539static int
3540bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3541{
3542 struct bge_softc *sc = ifp->if_softc;
3543 struct ifreq *ifr = (struct ifreq *) data;
3544 struct mii_data *mii;
3545 int flags, mask, error = 0;
3546
3547 switch (command) {
3548 case SIOCSIFMTU:
3549 if (ifr->ifr_mtu < ETHERMIN ||
3550 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3551 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3552 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3553 ifr->ifr_mtu > ETHERMTU))
3554 error = EINVAL;
3555 else if (ifp->if_mtu != ifr->ifr_mtu) {
3556 ifp->if_mtu = ifr->ifr_mtu;
3557 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3558 bge_init(sc);
3559 }
3560 break;
3561 case SIOCSIFFLAGS:
3562 BGE_LOCK(sc);
3563 if (ifp->if_flags & IFF_UP) {
3564 /*
3565 * If only the state of the PROMISC flag changed,
3566 * then just use the 'set promisc mode' command
3567 * instead of reinitializing the entire NIC. Doing
3568 * a full re-init means reloading the firmware and
3569 * waiting for it to start up, which may take a
3570 * second or two. Similarly for ALLMULTI.
3571 */
3572 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3573 flags = ifp->if_flags ^ sc->bge_if_flags;
3574 if (flags & IFF_PROMISC)
3575 bge_setpromisc(sc);
3576 if (flags & IFF_ALLMULTI)
3577 bge_setmulti(sc);
3578 } else
3579 bge_init_locked(sc);
3580 } else {
3581 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3582 bge_stop(sc);
3583 }
3584 }
3585 sc->bge_if_flags = ifp->if_flags;
3586 BGE_UNLOCK(sc);
3587 error = 0;
3588 break;
3589 case SIOCADDMULTI:
3590 case SIOCDELMULTI:
3591 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3592 BGE_LOCK(sc);
3593 bge_setmulti(sc);
3594 BGE_UNLOCK(sc);
3595 error = 0;
3596 }
3597 break;
3598 case SIOCSIFMEDIA:
3599 case SIOCGIFMEDIA:
3600 if (sc->bge_flags & BGE_FLAG_TBI) {
3601 error = ifmedia_ioctl(ifp, ifr,
3602 &sc->bge_ifmedia, command);
3603 } else {
3604 mii = device_get_softc(sc->bge_miibus);
3605 error = ifmedia_ioctl(ifp, ifr,
3606 &mii->mii_media, command);
3607 }
3608 break;
3609 case SIOCSIFCAP:
3610 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3611#ifdef DEVICE_POLLING
3612 if (mask & IFCAP_POLLING) {
3613 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3614 error = ether_poll_register(bge_poll, ifp);
3615 if (error)
3616 return (error);
3617 BGE_LOCK(sc);
3618 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3619 BGE_PCIMISCCTL_MASK_PCI_INTR);
3620 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3621 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3622 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3623 ifp->if_capenable |= IFCAP_POLLING;
3624 BGE_UNLOCK(sc);
3625 } else {
3626 error = ether_poll_deregister(ifp);
3627 /* Enable interrupt even in error case */
3628 BGE_LOCK(sc);
3629 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3630 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3631 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3632 BGE_PCIMISCCTL_MASK_PCI_INTR);
3633 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3634 ifp->if_capenable &= ~IFCAP_POLLING;
3635 BGE_UNLOCK(sc);
3636 }
3637 }
3638#endif
3639 if (mask & IFCAP_HWCSUM) {
3640 ifp->if_capenable ^= IFCAP_HWCSUM;
3641 if (IFCAP_HWCSUM & ifp->if_capenable &&
3642 IFCAP_HWCSUM & ifp->if_capabilities)
3643 ifp->if_hwassist = BGE_CSUM_FEATURES;
3644 else
3645 ifp->if_hwassist = 0;
3646 VLAN_CAPABILITIES(ifp);
3647 }
3648 break;
3649 default:
3650 error = ether_ioctl(ifp, command, data);
3651 break;
3652 }
3653
3654 return (error);
3655}
3656
3657static void
3658bge_watchdog(struct bge_softc *sc)
3659{
3660 struct ifnet *ifp;
3661
3662 BGE_LOCK_ASSERT(sc);
3663
3664 if (sc->bge_timer == 0 || --sc->bge_timer)
3665 return;
3666
3667 ifp = sc->bge_ifp;
3668
3669 if_printf(ifp, "watchdog timeout -- resetting\n");
3670
3671 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3672 bge_init(sc);
3673
3674 ifp->if_oerrors++;
3675}
3676
3677/*
3678 * Stop the adapter and free any mbufs allocated to the
3679 * RX and TX lists.
3680 */
3681static void
3682bge_stop(struct bge_softc *sc)
3683{
3684 struct ifnet *ifp;
3685 struct ifmedia_entry *ifm;
3686 struct mii_data *mii = NULL;
3687 int mtmp, itmp;
3688
3689 BGE_LOCK_ASSERT(sc);
3690
3691 ifp = sc->bge_ifp;
3692
3693 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3694 mii = device_get_softc(sc->bge_miibus);
3695
3696 callout_stop(&sc->bge_stat_ch);
3697
3698 /*
3699 * Disable all of the receiver blocks.
3700 */
3701 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3702 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3703 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3704 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3705 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3706 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3707 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3708 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3709
3710 /*
3711 * Disable all of the transmit blocks.
3712 */
3713 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3714 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3715 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3716 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3717 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3718 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3719 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3720 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3721
3722 /*
3723 * Shut down all of the memory managers and related
3724 * state machines.
3725 */
3726 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3727 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3728 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3729 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3730 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3731 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3732 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3733 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3734 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3735 }
3736
3737 /* Disable host interrupts. */
3738 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3739 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3740
3741 /*
3742 * Tell firmware we're shutting down.
3743 */
3744
3745 bge_stop_fw(sc);
3746 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3747 bge_reset(sc);
3748 bge_sig_legacy(sc, BGE_RESET_STOP);
3749 bge_sig_post_reset(sc, BGE_RESET_STOP);
3750
3751 /*
3752 * Keep the ASF firmware running if up.
3753 */
3754 if (sc->bge_asf_mode & ASF_STACKUP)
3755 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3756 else
3757 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3758
3759 /* Free the RX lists. */
3760 bge_free_rx_ring_std(sc);
3761
3762 /* Free jumbo RX list. */
3763 if (BGE_IS_JUMBO_CAPABLE(sc))
3764 bge_free_rx_ring_jumbo(sc);
3765
3766 /* Free TX buffers. */
3767 bge_free_tx_ring(sc);
3768
3769 /*
3770 * Isolate/power down the PHY, but leave the media selection
3771 * unchanged so that things will be put back to normal when
3772 * we bring the interface back up.
3773 */
3774 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3775 itmp = ifp->if_flags;
3776 ifp->if_flags |= IFF_UP;
3777 /*
3778 * If we are called from bge_detach(), mii is already NULL.
3779 */
3780 if (mii != NULL) {
3781 ifm = mii->mii_media.ifm_cur;
3782 mtmp = ifm->ifm_media;
3783 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3784 mii_mediachg(mii);
3785 ifm->ifm_media = mtmp;
3786 }
3787 ifp->if_flags = itmp;
3788 }
3789
3790 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3791
3792 /*
3793 * We can't just call bge_link_upd() cause chip is almost stopped so
3794 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3795 * lead to hardware deadlock. So we just clearing MAC's link state
3796 * (PHY may still have link UP).
3797 */
3798 if (bootverbose && sc->bge_link)
3799 if_printf(sc->bge_ifp, "link DOWN\n");
3800 sc->bge_link = 0;
3801
3802 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3803}
3804
3805/*
3806 * Stop all chip I/O so that the kernel's probe routines don't
3807 * get confused by errant DMAs when rebooting.
3808 */
3809static void
3810bge_shutdown(device_t dev)
3811{
3812 struct bge_softc *sc;
3813
3814 sc = device_get_softc(dev);
3815
3816 BGE_LOCK(sc);
3817 bge_stop(sc);
3818 bge_reset(sc);
3819 BGE_UNLOCK(sc);
3820}
3821
3822static int
3823bge_suspend(device_t dev)
3824{
3825 struct bge_softc *sc;
3826
3827 sc = device_get_softc(dev);
3828 BGE_LOCK(sc);
3829 bge_stop(sc);
3830 BGE_UNLOCK(sc);
3831
3832 return (0);
3833}
3834
3835static int
3836bge_resume(device_t dev)
3837{
3838 struct bge_softc *sc;
3839 struct ifnet *ifp;
3840
3841 sc = device_get_softc(dev);
3842 BGE_LOCK(sc);
3843 ifp = sc->bge_ifp;
3844 if (ifp->if_flags & IFF_UP) {
3845 bge_init_locked(sc);
3846 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3847 bge_start_locked(ifp);
3848 }
3849 BGE_UNLOCK(sc);
3850
3851 return (0);
3852}
3853
3854static void
3855bge_link_upd(struct bge_softc *sc)
3856{
3857 struct mii_data *mii;
3858 uint32_t link, status;
3859
3860 BGE_LOCK_ASSERT(sc);
3861
3862 /* Clear 'pending link event' flag. */
3863 sc->bge_link_evt = 0;
3864
3865 /*
3866 * Process link state changes.
3867 * Grrr. The link status word in the status block does
3868 * not work correctly on the BCM5700 rev AX and BX chips,
3869 * according to all available information. Hence, we have
3870 * to enable MII interrupts in order to properly obtain
3871 * async link changes. Unfortunately, this also means that
3872 * we have to read the MAC status register to detect link
3873 * changes, thereby adding an additional register access to
3874 * the interrupt handler.
3875 *
3876 * XXX: perhaps link state detection procedure used for
3877 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3878 */
3879
3880 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3881 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
3882 status = CSR_READ_4(sc, BGE_MAC_STS);
3883 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3884 callout_stop(&sc->bge_stat_ch);
3885 bge_tick(sc);
3886
3887 mii = device_get_softc(sc->bge_miibus);
3888 if (!sc->bge_link &&
3889 mii->mii_media_status & IFM_ACTIVE &&
3890 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3891 sc->bge_link++;
3892 if (bootverbose)
3893 if_printf(sc->bge_ifp, "link UP\n");
3894 } else if (sc->bge_link &&
3895 (!(mii->mii_media_status & IFM_ACTIVE) ||
3896 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3897 sc->bge_link = 0;
3898 if (bootverbose)
3899 if_printf(sc->bge_ifp, "link DOWN\n");
3900 }
3901
3902 /* Clear the interrupt. */
3903 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3904 BGE_EVTENB_MI_INTERRUPT);
3905 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3906 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3907 BRGPHY_INTRS);
3908 }
3909 return;
3910 }
3911
3912 if (sc->bge_flags & BGE_FLAG_TBI) {
3913 status = CSR_READ_4(sc, BGE_MAC_STS);
3914 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3915 if (!sc->bge_link) {
3916 sc->bge_link++;
3917 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3918 BGE_CLRBIT(sc, BGE_MAC_MODE,
3919 BGE_MACMODE_TBI_SEND_CFGS);
3920 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3921 if (bootverbose)
3922 if_printf(sc->bge_ifp, "link UP\n");
3923 if_link_state_change(sc->bge_ifp,
3924 LINK_STATE_UP);
3925 }
3926 } else if (sc->bge_link) {
3927 sc->bge_link = 0;
3928 if (bootverbose)
3929 if_printf(sc->bge_ifp, "link DOWN\n");
3930 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3931 }
3932 /* Discard link events for MII/GMII cards if MI auto-polling disabled */
3933 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3934 /*
3935 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3936 * in status word always set. Workaround this bug by reading
3937 * PHY link status directly.
3938 */
3939 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3940
3941 if (link != sc->bge_link ||
3942 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3943 callout_stop(&sc->bge_stat_ch);
3944 bge_tick(sc);
3945
3946 mii = device_get_softc(sc->bge_miibus);
3947 if (!sc->bge_link &&
3948 mii->mii_media_status & IFM_ACTIVE &&
3949 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3950 sc->bge_link++;
3951 if (bootverbose)
3952 if_printf(sc->bge_ifp, "link UP\n");
3953 } else if (sc->bge_link &&
3954 (!(mii->mii_media_status & IFM_ACTIVE) ||
3955 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3956 sc->bge_link = 0;
3957 if (bootverbose)
3958 if_printf(sc->bge_ifp, "link DOWN\n");
3959 }
3960 }
3961 }
3962
3963 /* Clear the attention. */
3964 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3965 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3966 BGE_MACSTAT_LINK_CHANGED);
3967}
3364 /* Init TX ring. */
3365 bge_init_tx_ring(sc);
3366
3367 /* Turn on transmitter. */
3368 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3369
3370 /* Turn on receiver. */
3371 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3372
3373 /* Tell firmware we're alive. */
3374 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3375
3376#ifdef DEVICE_POLLING
3377 /* Disable interrupts if we are polling. */
3378 if (ifp->if_capenable & IFCAP_POLLING) {
3379 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3380 BGE_PCIMISCCTL_MASK_PCI_INTR);
3381 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3382 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3383 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3384 } else
3385#endif
3386
3387 /* Enable host interrupts. */
3388 {
3389 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3390 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3391 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3392 }
3393
3394 bge_ifmedia_upd_locked(ifp);
3395
3396 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3397 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3398
3399 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3400}
3401
3402static void
3403bge_init(void *xsc)
3404{
3405 struct bge_softc *sc = xsc;
3406
3407 BGE_LOCK(sc);
3408 bge_init_locked(sc);
3409 BGE_UNLOCK(sc);
3410}
3411
3412/*
3413 * Set media options.
3414 */
3415static int
3416bge_ifmedia_upd(struct ifnet *ifp)
3417{
3418 struct bge_softc *sc = ifp->if_softc;
3419 int res;
3420
3421 BGE_LOCK(sc);
3422 res = bge_ifmedia_upd_locked(ifp);
3423 BGE_UNLOCK(sc);
3424
3425 return (res);
3426}
3427
3428static int
3429bge_ifmedia_upd_locked(struct ifnet *ifp)
3430{
3431 struct bge_softc *sc = ifp->if_softc;
3432 struct mii_data *mii;
3433 struct ifmedia *ifm;
3434
3435 BGE_LOCK_ASSERT(sc);
3436
3437 ifm = &sc->bge_ifmedia;
3438
3439 /* If this is a 1000baseX NIC, enable the TBI port. */
3440 if (sc->bge_flags & BGE_FLAG_TBI) {
3441 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3442 return (EINVAL);
3443 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3444 case IFM_AUTO:
3445 /*
3446 * The BCM5704 ASIC appears to have a special
3447 * mechanism for programming the autoneg
3448 * advertisement registers in TBI mode.
3449 */
3450 if (bge_fake_autoneg == 0 &&
3451 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3452 uint32_t sgdig;
3453 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3454 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3455 sgdig |= BGE_SGDIGCFG_AUTO|
3456 BGE_SGDIGCFG_PAUSE_CAP|
3457 BGE_SGDIGCFG_ASYM_PAUSE;
3458 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3459 sgdig|BGE_SGDIGCFG_SEND);
3460 DELAY(5);
3461 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3462 }
3463 break;
3464 case IFM_1000_SX:
3465 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3466 BGE_CLRBIT(sc, BGE_MAC_MODE,
3467 BGE_MACMODE_HALF_DUPLEX);
3468 } else {
3469 BGE_SETBIT(sc, BGE_MAC_MODE,
3470 BGE_MACMODE_HALF_DUPLEX);
3471 }
3472 break;
3473 default:
3474 return (EINVAL);
3475 }
3476 return (0);
3477 }
3478
3479 sc->bge_link_evt++;
3480 mii = device_get_softc(sc->bge_miibus);
3481 if (mii->mii_instance) {
3482 struct mii_softc *miisc;
3483 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3484 miisc = LIST_NEXT(miisc, mii_list))
3485 mii_phy_reset(miisc);
3486 }
3487 mii_mediachg(mii);
3488
3489 return (0);
3490}
3491
3492/*
3493 * Report current media status.
3494 */
3495static void
3496bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3497{
3498 struct bge_softc *sc = ifp->if_softc;
3499 struct mii_data *mii;
3500
3501 BGE_LOCK(sc);
3502
3503 if (sc->bge_flags & BGE_FLAG_TBI) {
3504 ifmr->ifm_status = IFM_AVALID;
3505 ifmr->ifm_active = IFM_ETHER;
3506 if (CSR_READ_4(sc, BGE_MAC_STS) &
3507 BGE_MACSTAT_TBI_PCS_SYNCHED)
3508 ifmr->ifm_status |= IFM_ACTIVE;
3509 else {
3510 ifmr->ifm_active |= IFM_NONE;
3511 BGE_UNLOCK(sc);
3512 return;
3513 }
3514 ifmr->ifm_active |= IFM_1000_SX;
3515 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3516 ifmr->ifm_active |= IFM_HDX;
3517 else
3518 ifmr->ifm_active |= IFM_FDX;
3519 BGE_UNLOCK(sc);
3520 return;
3521 }
3522
3523 mii = device_get_softc(sc->bge_miibus);
3524 mii_pollstat(mii);
3525 ifmr->ifm_active = mii->mii_media_active;
3526 ifmr->ifm_status = mii->mii_media_status;
3527
3528 BGE_UNLOCK(sc);
3529}
3530
3531static int
3532bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3533{
3534 struct bge_softc *sc = ifp->if_softc;
3535 struct ifreq *ifr = (struct ifreq *) data;
3536 struct mii_data *mii;
3537 int flags, mask, error = 0;
3538
3539 switch (command) {
3540 case SIOCSIFMTU:
3541 if (ifr->ifr_mtu < ETHERMIN ||
3542 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3543 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3544 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3545 ifr->ifr_mtu > ETHERMTU))
3546 error = EINVAL;
3547 else if (ifp->if_mtu != ifr->ifr_mtu) {
3548 ifp->if_mtu = ifr->ifr_mtu;
3549 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3550 bge_init(sc);
3551 }
3552 break;
3553 case SIOCSIFFLAGS:
3554 BGE_LOCK(sc);
3555 if (ifp->if_flags & IFF_UP) {
3556 /*
3557 * If only the state of the PROMISC flag changed,
3558 * then just use the 'set promisc mode' command
3559 * instead of reinitializing the entire NIC. Doing
3560 * a full re-init means reloading the firmware and
3561 * waiting for it to start up, which may take a
3562 * second or two. Similarly for ALLMULTI.
3563 */
3564 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3565 flags = ifp->if_flags ^ sc->bge_if_flags;
3566 if (flags & IFF_PROMISC)
3567 bge_setpromisc(sc);
3568 if (flags & IFF_ALLMULTI)
3569 bge_setmulti(sc);
3570 } else
3571 bge_init_locked(sc);
3572 } else {
3573 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3574 bge_stop(sc);
3575 }
3576 }
3577 sc->bge_if_flags = ifp->if_flags;
3578 BGE_UNLOCK(sc);
3579 error = 0;
3580 break;
3581 case SIOCADDMULTI:
3582 case SIOCDELMULTI:
3583 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3584 BGE_LOCK(sc);
3585 bge_setmulti(sc);
3586 BGE_UNLOCK(sc);
3587 error = 0;
3588 }
3589 break;
3590 case SIOCSIFMEDIA:
3591 case SIOCGIFMEDIA:
3592 if (sc->bge_flags & BGE_FLAG_TBI) {
3593 error = ifmedia_ioctl(ifp, ifr,
3594 &sc->bge_ifmedia, command);
3595 } else {
3596 mii = device_get_softc(sc->bge_miibus);
3597 error = ifmedia_ioctl(ifp, ifr,
3598 &mii->mii_media, command);
3599 }
3600 break;
3601 case SIOCSIFCAP:
3602 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3603#ifdef DEVICE_POLLING
3604 if (mask & IFCAP_POLLING) {
3605 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3606 error = ether_poll_register(bge_poll, ifp);
3607 if (error)
3608 return (error);
3609 BGE_LOCK(sc);
3610 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3611 BGE_PCIMISCCTL_MASK_PCI_INTR);
3612 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3613 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3614 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3615 ifp->if_capenable |= IFCAP_POLLING;
3616 BGE_UNLOCK(sc);
3617 } else {
3618 error = ether_poll_deregister(ifp);
3619 /* Enable interrupt even in error case */
3620 BGE_LOCK(sc);
3621 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3622 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3623 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3624 BGE_PCIMISCCTL_MASK_PCI_INTR);
3625 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3626 ifp->if_capenable &= ~IFCAP_POLLING;
3627 BGE_UNLOCK(sc);
3628 }
3629 }
3630#endif
3631 if (mask & IFCAP_HWCSUM) {
3632 ifp->if_capenable ^= IFCAP_HWCSUM;
3633 if (IFCAP_HWCSUM & ifp->if_capenable &&
3634 IFCAP_HWCSUM & ifp->if_capabilities)
3635 ifp->if_hwassist = BGE_CSUM_FEATURES;
3636 else
3637 ifp->if_hwassist = 0;
3638 VLAN_CAPABILITIES(ifp);
3639 }
3640 break;
3641 default:
3642 error = ether_ioctl(ifp, command, data);
3643 break;
3644 }
3645
3646 return (error);
3647}
3648
3649static void
3650bge_watchdog(struct bge_softc *sc)
3651{
3652 struct ifnet *ifp;
3653
3654 BGE_LOCK_ASSERT(sc);
3655
3656 if (sc->bge_timer == 0 || --sc->bge_timer)
3657 return;
3658
3659 ifp = sc->bge_ifp;
3660
3661 if_printf(ifp, "watchdog timeout -- resetting\n");
3662
3663 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3664 bge_init(sc);
3665
3666 ifp->if_oerrors++;
3667}
3668
3669/*
3670 * Stop the adapter and free any mbufs allocated to the
3671 * RX and TX lists.
3672 */
3673static void
3674bge_stop(struct bge_softc *sc)
3675{
3676 struct ifnet *ifp;
3677 struct ifmedia_entry *ifm;
3678 struct mii_data *mii = NULL;
3679 int mtmp, itmp;
3680
3681 BGE_LOCK_ASSERT(sc);
3682
3683 ifp = sc->bge_ifp;
3684
3685 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3686 mii = device_get_softc(sc->bge_miibus);
3687
3688 callout_stop(&sc->bge_stat_ch);
3689
3690 /*
3691 * Disable all of the receiver blocks.
3692 */
3693 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3694 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3695 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3696 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3697 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3698 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3699 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3700 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3701
3702 /*
3703 * Disable all of the transmit blocks.
3704 */
3705 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3706 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3707 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3708 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3709 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3710 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3711 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3712 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3713
3714 /*
3715 * Shut down all of the memory managers and related
3716 * state machines.
3717 */
3718 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3719 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3720 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3721 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3722 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3723 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3724 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3725 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3726 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3727 }
3728
3729 /* Disable host interrupts. */
3730 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3731 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3732
3733 /*
3734 * Tell firmware we're shutting down.
3735 */
3736
3737 bge_stop_fw(sc);
3738 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3739 bge_reset(sc);
3740 bge_sig_legacy(sc, BGE_RESET_STOP);
3741 bge_sig_post_reset(sc, BGE_RESET_STOP);
3742
3743 /*
3744 * Keep the ASF firmware running if up.
3745 */
3746 if (sc->bge_asf_mode & ASF_STACKUP)
3747 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3748 else
3749 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3750
3751 /* Free the RX lists. */
3752 bge_free_rx_ring_std(sc);
3753
3754 /* Free jumbo RX list. */
3755 if (BGE_IS_JUMBO_CAPABLE(sc))
3756 bge_free_rx_ring_jumbo(sc);
3757
3758 /* Free TX buffers. */
3759 bge_free_tx_ring(sc);
3760
3761 /*
3762 * Isolate/power down the PHY, but leave the media selection
3763 * unchanged so that things will be put back to normal when
3764 * we bring the interface back up.
3765 */
3766 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3767 itmp = ifp->if_flags;
3768 ifp->if_flags |= IFF_UP;
3769 /*
3770 * If we are called from bge_detach(), mii is already NULL.
3771 */
3772 if (mii != NULL) {
3773 ifm = mii->mii_media.ifm_cur;
3774 mtmp = ifm->ifm_media;
3775 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3776 mii_mediachg(mii);
3777 ifm->ifm_media = mtmp;
3778 }
3779 ifp->if_flags = itmp;
3780 }
3781
3782 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3783
3784 /*
3785 * We can't just call bge_link_upd() cause chip is almost stopped so
3786 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3787 * lead to hardware deadlock. So we just clearing MAC's link state
3788 * (PHY may still have link UP).
3789 */
3790 if (bootverbose && sc->bge_link)
3791 if_printf(sc->bge_ifp, "link DOWN\n");
3792 sc->bge_link = 0;
3793
3794 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3795}
3796
3797/*
3798 * Stop all chip I/O so that the kernel's probe routines don't
3799 * get confused by errant DMAs when rebooting.
3800 */
3801static void
3802bge_shutdown(device_t dev)
3803{
3804 struct bge_softc *sc;
3805
3806 sc = device_get_softc(dev);
3807
3808 BGE_LOCK(sc);
3809 bge_stop(sc);
3810 bge_reset(sc);
3811 BGE_UNLOCK(sc);
3812}
3813
3814static int
3815bge_suspend(device_t dev)
3816{
3817 struct bge_softc *sc;
3818
3819 sc = device_get_softc(dev);
3820 BGE_LOCK(sc);
3821 bge_stop(sc);
3822 BGE_UNLOCK(sc);
3823
3824 return (0);
3825}
3826
3827static int
3828bge_resume(device_t dev)
3829{
3830 struct bge_softc *sc;
3831 struct ifnet *ifp;
3832
3833 sc = device_get_softc(dev);
3834 BGE_LOCK(sc);
3835 ifp = sc->bge_ifp;
3836 if (ifp->if_flags & IFF_UP) {
3837 bge_init_locked(sc);
3838 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3839 bge_start_locked(ifp);
3840 }
3841 BGE_UNLOCK(sc);
3842
3843 return (0);
3844}
3845
3846static void
3847bge_link_upd(struct bge_softc *sc)
3848{
3849 struct mii_data *mii;
3850 uint32_t link, status;
3851
3852 BGE_LOCK_ASSERT(sc);
3853
3854 /* Clear 'pending link event' flag. */
3855 sc->bge_link_evt = 0;
3856
3857 /*
3858 * Process link state changes.
3859 * Grrr. The link status word in the status block does
3860 * not work correctly on the BCM5700 rev AX and BX chips,
3861 * according to all available information. Hence, we have
3862 * to enable MII interrupts in order to properly obtain
3863 * async link changes. Unfortunately, this also means that
3864 * we have to read the MAC status register to detect link
3865 * changes, thereby adding an additional register access to
3866 * the interrupt handler.
3867 *
3868 * XXX: perhaps link state detection procedure used for
3869 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3870 */
3871
3872 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3873 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
3874 status = CSR_READ_4(sc, BGE_MAC_STS);
3875 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3876 callout_stop(&sc->bge_stat_ch);
3877 bge_tick(sc);
3878
3879 mii = device_get_softc(sc->bge_miibus);
3880 if (!sc->bge_link &&
3881 mii->mii_media_status & IFM_ACTIVE &&
3882 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3883 sc->bge_link++;
3884 if (bootverbose)
3885 if_printf(sc->bge_ifp, "link UP\n");
3886 } else if (sc->bge_link &&
3887 (!(mii->mii_media_status & IFM_ACTIVE) ||
3888 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3889 sc->bge_link = 0;
3890 if (bootverbose)
3891 if_printf(sc->bge_ifp, "link DOWN\n");
3892 }
3893
3894 /* Clear the interrupt. */
3895 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3896 BGE_EVTENB_MI_INTERRUPT);
3897 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3898 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3899 BRGPHY_INTRS);
3900 }
3901 return;
3902 }
3903
3904 if (sc->bge_flags & BGE_FLAG_TBI) {
3905 status = CSR_READ_4(sc, BGE_MAC_STS);
3906 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3907 if (!sc->bge_link) {
3908 sc->bge_link++;
3909 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3910 BGE_CLRBIT(sc, BGE_MAC_MODE,
3911 BGE_MACMODE_TBI_SEND_CFGS);
3912 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3913 if (bootverbose)
3914 if_printf(sc->bge_ifp, "link UP\n");
3915 if_link_state_change(sc->bge_ifp,
3916 LINK_STATE_UP);
3917 }
3918 } else if (sc->bge_link) {
3919 sc->bge_link = 0;
3920 if (bootverbose)
3921 if_printf(sc->bge_ifp, "link DOWN\n");
3922 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3923 }
3924 /* Discard link events for MII/GMII cards if MI auto-polling disabled */
3925 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3926 /*
3927 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3928 * in status word always set. Workaround this bug by reading
3929 * PHY link status directly.
3930 */
3931 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3932
3933 if (link != sc->bge_link ||
3934 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3935 callout_stop(&sc->bge_stat_ch);
3936 bge_tick(sc);
3937
3938 mii = device_get_softc(sc->bge_miibus);
3939 if (!sc->bge_link &&
3940 mii->mii_media_status & IFM_ACTIVE &&
3941 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3942 sc->bge_link++;
3943 if (bootverbose)
3944 if_printf(sc->bge_ifp, "link UP\n");
3945 } else if (sc->bge_link &&
3946 (!(mii->mii_media_status & IFM_ACTIVE) ||
3947 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3948 sc->bge_link = 0;
3949 if (bootverbose)
3950 if_printf(sc->bge_ifp, "link DOWN\n");
3951 }
3952 }
3953 }
3954
3955 /* Clear the attention. */
3956 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3957 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3958 BGE_MACSTAT_LINK_CHANGED);
3959}