Deleted Added
full compact
if_bge.c (226867) if_bge.c (226871)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 226867 2011-10-27 22:10:52Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 226871 2011-10-28 01:04:40Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
304 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
305 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
306 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
307 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
308 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
309 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
310 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
311 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
312 /* 5754 and 5787 share the same ASIC ID */
313 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
314 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
315 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
316 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
317 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
318 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
319 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
320 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
321 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
322
323 { 0, NULL }
324};
325
326/*
327 * Some defaults for major revisions, so that newer steppings
328 * that we don't know about have a shot at working.
329 */
330static const struct bge_revision const bge_majorrevs[] = {
331 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
332 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
333 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
334 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
335 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
336 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
337 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
338 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
339 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
340 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
341 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
342 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
343 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
344 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
345 /* 5754 and 5787 share the same ASIC ID */
346 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
347 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
348 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
349 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
350 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
351 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
305 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
306 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
307 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
308 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
309 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
310 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
311 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
312 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
313 /* 5754 and 5787 share the same ASIC ID */
314 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
315 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
316 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
317 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
318 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
319 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
320 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
321 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
322 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
323
324 { 0, NULL }
325};
326
327/*
328 * Some defaults for major revisions, so that newer steppings
329 * that we don't know about have a shot at working.
330 */
331static const struct bge_revision const bge_majorrevs[] = {
332 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
333 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
334 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
335 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
336 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
337 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
338 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
339 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
340 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
341 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
342 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
343 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
344 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
345 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
346 /* 5754 and 5787 share the same ASIC ID */
347 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
348 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
349 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
350 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
351 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
352 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
353 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
352
353 { 0, NULL }
354};
355
356#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
357#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
358#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
359#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
360#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
361#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
362#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
363
364const struct bge_revision * bge_lookup_rev(uint32_t);
365const struct bge_vendor * bge_lookup_vendor(uint16_t);
366
367typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
368
369static int bge_probe(device_t);
370static int bge_attach(device_t);
371static int bge_detach(device_t);
372static int bge_suspend(device_t);
373static int bge_resume(device_t);
374static void bge_release_resources(struct bge_softc *);
375static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
376static int bge_dma_alloc(struct bge_softc *);
377static void bge_dma_free(struct bge_softc *);
378static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
379 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
380
381static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
382static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
383static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
384static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
386
387static void bge_txeof(struct bge_softc *, uint16_t);
388static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
389static int bge_rxeof(struct bge_softc *, uint16_t, int);
390
391static void bge_asf_driver_up (struct bge_softc *);
392static void bge_tick(void *);
393static void bge_stats_clear_regs(struct bge_softc *);
394static void bge_stats_update(struct bge_softc *);
395static void bge_stats_update_regs(struct bge_softc *);
396static struct mbuf *bge_check_short_dma(struct mbuf *);
397static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
398 uint16_t *, uint16_t *);
399static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
400
401static void bge_intr(void *);
402static int bge_msi_intr(void *);
403static void bge_intr_task(void *, int);
404static void bge_start_locked(struct ifnet *);
405static void bge_start(struct ifnet *);
406static int bge_ioctl(struct ifnet *, u_long, caddr_t);
407static void bge_init_locked(struct bge_softc *);
408static void bge_init(void *);
409static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
410static void bge_stop(struct bge_softc *);
411static void bge_watchdog(struct bge_softc *);
412static int bge_shutdown(device_t);
413static int bge_ifmedia_upd_locked(struct ifnet *);
414static int bge_ifmedia_upd(struct ifnet *);
415static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
416
417static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
418static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
419
420static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
421static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
422
423static void bge_setpromisc(struct bge_softc *);
424static void bge_setmulti(struct bge_softc *);
425static void bge_setvlan(struct bge_softc *);
426
427static __inline void bge_rxreuse_std(struct bge_softc *, int);
428static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
429static int bge_newbuf_std(struct bge_softc *, int);
430static int bge_newbuf_jumbo(struct bge_softc *, int);
431static int bge_init_rx_ring_std(struct bge_softc *);
432static void bge_free_rx_ring_std(struct bge_softc *);
433static int bge_init_rx_ring_jumbo(struct bge_softc *);
434static void bge_free_rx_ring_jumbo(struct bge_softc *);
435static void bge_free_tx_ring(struct bge_softc *);
436static int bge_init_tx_ring(struct bge_softc *);
437
438static int bge_chipinit(struct bge_softc *);
439static int bge_blockinit(struct bge_softc *);
354
355 { 0, NULL }
356};
357
358#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
359#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
360#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
361#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
362#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
363#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
364#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
365
366const struct bge_revision * bge_lookup_rev(uint32_t);
367const struct bge_vendor * bge_lookup_vendor(uint16_t);
368
369typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
370
371static int bge_probe(device_t);
372static int bge_attach(device_t);
373static int bge_detach(device_t);
374static int bge_suspend(device_t);
375static int bge_resume(device_t);
376static void bge_release_resources(struct bge_softc *);
377static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
378static int bge_dma_alloc(struct bge_softc *);
379static void bge_dma_free(struct bge_softc *);
380static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
381 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
382
383static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
384static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
386static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
387static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
388
389static void bge_txeof(struct bge_softc *, uint16_t);
390static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
391static int bge_rxeof(struct bge_softc *, uint16_t, int);
392
393static void bge_asf_driver_up (struct bge_softc *);
394static void bge_tick(void *);
395static void bge_stats_clear_regs(struct bge_softc *);
396static void bge_stats_update(struct bge_softc *);
397static void bge_stats_update_regs(struct bge_softc *);
398static struct mbuf *bge_check_short_dma(struct mbuf *);
399static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
400 uint16_t *, uint16_t *);
401static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
402
403static void bge_intr(void *);
404static int bge_msi_intr(void *);
405static void bge_intr_task(void *, int);
406static void bge_start_locked(struct ifnet *);
407static void bge_start(struct ifnet *);
408static int bge_ioctl(struct ifnet *, u_long, caddr_t);
409static void bge_init_locked(struct bge_softc *);
410static void bge_init(void *);
411static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
412static void bge_stop(struct bge_softc *);
413static void bge_watchdog(struct bge_softc *);
414static int bge_shutdown(device_t);
415static int bge_ifmedia_upd_locked(struct ifnet *);
416static int bge_ifmedia_upd(struct ifnet *);
417static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
418
419static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
420static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
421
422static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
423static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
424
425static void bge_setpromisc(struct bge_softc *);
426static void bge_setmulti(struct bge_softc *);
427static void bge_setvlan(struct bge_softc *);
428
429static __inline void bge_rxreuse_std(struct bge_softc *, int);
430static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
431static int bge_newbuf_std(struct bge_softc *, int);
432static int bge_newbuf_jumbo(struct bge_softc *, int);
433static int bge_init_rx_ring_std(struct bge_softc *);
434static void bge_free_rx_ring_std(struct bge_softc *);
435static int bge_init_rx_ring_jumbo(struct bge_softc *);
436static void bge_free_rx_ring_jumbo(struct bge_softc *);
437static void bge_free_tx_ring(struct bge_softc *);
438static int bge_init_tx_ring(struct bge_softc *);
439
440static int bge_chipinit(struct bge_softc *);
441static int bge_blockinit(struct bge_softc *);
442static uint32_t bge_dma_swap_options(struct bge_softc *);
440
441static int bge_has_eaddr(struct bge_softc *);
442static uint32_t bge_readmem_ind(struct bge_softc *, int);
443static void bge_writemem_ind(struct bge_softc *, int, int);
444static void bge_writembx(struct bge_softc *, int, int);
445#ifdef notdef
446static uint32_t bge_readreg_ind(struct bge_softc *, int);
447#endif
448static void bge_writemem_direct(struct bge_softc *, int, int);
449static void bge_writereg_ind(struct bge_softc *, int, int);
450
451static int bge_miibus_readreg(device_t, int, int);
452static int bge_miibus_writereg(device_t, int, int, int);
453static void bge_miibus_statchg(device_t);
454#ifdef DEVICE_POLLING
455static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
456#endif
457
458#define BGE_RESET_START 1
459#define BGE_RESET_STOP 2
460static void bge_sig_post_reset(struct bge_softc *, int);
461static void bge_sig_legacy(struct bge_softc *, int);
462static void bge_sig_pre_reset(struct bge_softc *, int);
463static void bge_stop_fw(struct bge_softc *);
464static int bge_reset(struct bge_softc *);
465static void bge_link_upd(struct bge_softc *);
466
467/*
468 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
469 * leak information to untrusted users. It is also known to cause alignment
470 * traps on certain architectures.
471 */
472#ifdef BGE_REGISTER_DEBUG
473static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
474static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
475static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
476#endif
477static void bge_add_sysctls(struct bge_softc *);
478static void bge_add_sysctl_stats_regs(struct bge_softc *,
479 struct sysctl_ctx_list *, struct sysctl_oid_list *);
480static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
481 struct sysctl_oid_list *);
482static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
483
484static device_method_t bge_methods[] = {
485 /* Device interface */
486 DEVMETHOD(device_probe, bge_probe),
487 DEVMETHOD(device_attach, bge_attach),
488 DEVMETHOD(device_detach, bge_detach),
489 DEVMETHOD(device_shutdown, bge_shutdown),
490 DEVMETHOD(device_suspend, bge_suspend),
491 DEVMETHOD(device_resume, bge_resume),
492
493 /* bus interface */
494 DEVMETHOD(bus_print_child, bus_generic_print_child),
495 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
496
497 /* MII interface */
498 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
499 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
500 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
501
502 { 0, 0 }
503};
504
505static driver_t bge_driver = {
506 "bge",
507 bge_methods,
508 sizeof(struct bge_softc)
509};
510
511static devclass_t bge_devclass;
512
513DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
514DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
515
516static int bge_allow_asf = 1;
517
518TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
519
520SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
521SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
522 "Allow ASF mode if available");
523
524#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
525#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
526#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
527#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
528#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
529
530static int
531bge_has_eaddr(struct bge_softc *sc)
532{
533#ifdef __sparc64__
534 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
535 device_t dev;
536 uint32_t subvendor;
537
538 dev = sc->bge_dev;
539
540 /*
541 * The on-board BGEs found in sun4u machines aren't fitted with
542 * an EEPROM which means that we have to obtain the MAC address
543 * via OFW and that some tests will always fail. We distinguish
544 * such BGEs by the subvendor ID, which also has to be obtained
545 * from OFW instead of the PCI configuration space as the latter
546 * indicates Broadcom as the subvendor of the netboot interface.
547 * For early Blade 1500 and 2500 we even have to check the OFW
548 * device path as the subvendor ID always defaults to Broadcom
549 * there.
550 */
551 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
552 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
553 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
554 return (0);
555 memset(buf, 0, sizeof(buf));
556 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
557 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
558 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
559 return (0);
560 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
561 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
562 return (0);
563 }
564#endif
565 return (1);
566}
567
568static uint32_t
569bge_readmem_ind(struct bge_softc *sc, int off)
570{
571 device_t dev;
572 uint32_t val;
573
574 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
575 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
576 return (0);
577
578 dev = sc->bge_dev;
579
580 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
581 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
583 return (val);
584}
585
586static void
587bge_writemem_ind(struct bge_softc *sc, int off, int val)
588{
589 device_t dev;
590
591 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
592 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
593 return;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
598 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
599 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
600}
601
602#ifdef notdef
603static uint32_t
604bge_readreg_ind(struct bge_softc *sc, int off)
605{
606 device_t dev;
607
608 dev = sc->bge_dev;
609
610 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
611 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
612}
613#endif
614
615static void
616bge_writereg_ind(struct bge_softc *sc, int off, int val)
617{
618 device_t dev;
619
620 dev = sc->bge_dev;
621
622 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
623 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
624}
625
626static void
627bge_writemem_direct(struct bge_softc *sc, int off, int val)
628{
629 CSR_WRITE_4(sc, off, val);
630}
631
632static void
633bge_writembx(struct bge_softc *sc, int off, int val)
634{
635 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
636 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
637
638 CSR_WRITE_4(sc, off, val);
639}
640
641/*
642 * Map a single buffer address.
643 */
644
645static void
646bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
647{
648 struct bge_dmamap_arg *ctx;
649
650 if (error)
651 return;
652
653 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
654
655 ctx = arg;
656 ctx->bge_busaddr = segs->ds_addr;
657}
658
659static uint8_t
660bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
661{
662 uint32_t access, byte = 0;
663 int i;
664
665 /* Lock. */
666 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
667 for (i = 0; i < 8000; i++) {
668 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
669 break;
670 DELAY(20);
671 }
672 if (i == 8000)
673 return (1);
674
675 /* Enable access. */
676 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
677 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
678
679 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
680 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
681 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
682 DELAY(10);
683 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
684 DELAY(10);
685 break;
686 }
687 }
688
689 if (i == BGE_TIMEOUT * 10) {
690 if_printf(sc->bge_ifp, "nvram read timed out\n");
691 return (1);
692 }
693
694 /* Get result. */
695 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
696
697 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
698
699 /* Disable access. */
700 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
701
702 /* Unlock. */
703 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
704 CSR_READ_4(sc, BGE_NVRAM_SWARB);
705
706 return (0);
707}
708
709/*
710 * Read a sequence of bytes from NVRAM.
711 */
712static int
713bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
714{
715 int err = 0, i;
716 uint8_t byte = 0;
717
718 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
719 return (1);
720
721 for (i = 0; i < cnt; i++) {
722 err = bge_nvram_getbyte(sc, off + i, &byte);
723 if (err)
724 break;
725 *(dest + i) = byte;
726 }
727
728 return (err ? 1 : 0);
729}
730
731/*
732 * Read a byte of data stored in the EEPROM at address 'addr.' The
733 * BCM570x supports both the traditional bitbang interface and an
734 * auto access interface for reading the EEPROM. We use the auto
735 * access method.
736 */
737static uint8_t
738bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
739{
740 int i;
741 uint32_t byte = 0;
742
743 /*
744 * Enable use of auto EEPROM access so we can avoid
745 * having to use the bitbang method.
746 */
747 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
748
749 /* Reset the EEPROM, load the clock period. */
750 CSR_WRITE_4(sc, BGE_EE_ADDR,
751 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
752 DELAY(20);
753
754 /* Issue the read EEPROM command. */
755 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
756
757 /* Wait for completion */
758 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
759 DELAY(10);
760 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
761 break;
762 }
763
764 if (i == BGE_TIMEOUT * 10) {
765 device_printf(sc->bge_dev, "EEPROM read timed out\n");
766 return (1);
767 }
768
769 /* Get result. */
770 byte = CSR_READ_4(sc, BGE_EE_DATA);
771
772 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
773
774 return (0);
775}
776
777/*
778 * Read a sequence of bytes from the EEPROM.
779 */
780static int
781bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
782{
783 int i, error = 0;
784 uint8_t byte = 0;
785
786 for (i = 0; i < cnt; i++) {
787 error = bge_eeprom_getbyte(sc, off + i, &byte);
788 if (error)
789 break;
790 *(dest + i) = byte;
791 }
792
793 return (error ? 1 : 0);
794}
795
796static int
797bge_miibus_readreg(device_t dev, int phy, int reg)
798{
799 struct bge_softc *sc;
800 uint32_t val;
801 int i;
802
803 sc = device_get_softc(dev);
804
805 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
806 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
807 CSR_WRITE_4(sc, BGE_MI_MODE,
808 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
809 DELAY(80);
810 }
811
812 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
813 BGE_MIPHY(phy) | BGE_MIREG(reg));
814
815 /* Poll for the PHY register access to complete. */
816 for (i = 0; i < BGE_TIMEOUT; i++) {
817 DELAY(10);
818 val = CSR_READ_4(sc, BGE_MI_COMM);
819 if ((val & BGE_MICOMM_BUSY) == 0) {
820 DELAY(5);
821 val = CSR_READ_4(sc, BGE_MI_COMM);
822 break;
823 }
824 }
825
826 if (i == BGE_TIMEOUT) {
827 device_printf(sc->bge_dev,
828 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
829 phy, reg, val);
830 val = 0;
831 }
832
833 /* Restore the autopoll bit if necessary. */
834 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
835 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
836 DELAY(80);
837 }
838
839 if (val & BGE_MICOMM_READFAIL)
840 return (0);
841
842 return (val & 0xFFFF);
843}
844
845static int
846bge_miibus_writereg(device_t dev, int phy, int reg, int val)
847{
848 struct bge_softc *sc;
849 int i;
850
851 sc = device_get_softc(dev);
852
853 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
854 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
855 return (0);
856
857 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
858 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
859 CSR_WRITE_4(sc, BGE_MI_MODE,
860 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
861 DELAY(80);
862 }
863
864 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
865 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
866
867 for (i = 0; i < BGE_TIMEOUT; i++) {
868 DELAY(10);
869 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
870 DELAY(5);
871 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
872 break;
873 }
874 }
875
876 /* Restore the autopoll bit if necessary. */
877 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
878 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
879 DELAY(80);
880 }
881
882 if (i == BGE_TIMEOUT)
883 device_printf(sc->bge_dev,
884 "PHY write timed out (phy %d, reg %d, val %d)\n",
885 phy, reg, val);
886
887 return (0);
888}
889
890static void
891bge_miibus_statchg(device_t dev)
892{
893 struct bge_softc *sc;
894 struct mii_data *mii;
895 sc = device_get_softc(dev);
896 mii = device_get_softc(sc->bge_miibus);
897
898 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
899 (IFM_ACTIVE | IFM_AVALID)) {
900 switch (IFM_SUBTYPE(mii->mii_media_active)) {
901 case IFM_10_T:
902 case IFM_100_TX:
903 sc->bge_link = 1;
904 break;
905 case IFM_1000_T:
906 case IFM_1000_SX:
907 case IFM_2500_SX:
908 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
909 sc->bge_link = 1;
910 else
911 sc->bge_link = 0;
912 break;
913 default:
914 sc->bge_link = 0;
915 break;
916 }
917 } else
918 sc->bge_link = 0;
919 if (sc->bge_link == 0)
920 return;
921 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
922 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
923 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
924 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
925 else
926 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
927
928 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
929 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
930 if ((IFM_OPTIONS(mii->mii_media_active) &
931 IFM_ETH_TXPAUSE) != 0)
932 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
933 else
934 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
935 if ((IFM_OPTIONS(mii->mii_media_active) &
936 IFM_ETH_RXPAUSE) != 0)
937 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
938 else
939 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
940 } else {
941 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
942 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
943 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
944 }
945}
946
947/*
948 * Intialize a standard receive ring descriptor.
949 */
950static int
951bge_newbuf_std(struct bge_softc *sc, int i)
952{
953 struct mbuf *m;
954 struct bge_rx_bd *r;
955 bus_dma_segment_t segs[1];
956 bus_dmamap_t map;
957 int error, nsegs;
958
959 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
960 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
961 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
962 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
963 if (m == NULL)
964 return (ENOBUFS);
965 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
966 } else {
967 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
968 if (m == NULL)
969 return (ENOBUFS);
970 m->m_len = m->m_pkthdr.len = MCLBYTES;
971 }
972 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
973 m_adj(m, ETHER_ALIGN);
974
975 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
976 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
977 if (error != 0) {
978 m_freem(m);
979 return (error);
980 }
981 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
982 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
983 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
984 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
985 sc->bge_cdata.bge_rx_std_dmamap[i]);
986 }
987 map = sc->bge_cdata.bge_rx_std_dmamap[i];
988 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
989 sc->bge_cdata.bge_rx_std_sparemap = map;
990 sc->bge_cdata.bge_rx_std_chain[i] = m;
991 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
992 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
993 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
994 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
995 r->bge_flags = BGE_RXBDFLAG_END;
996 r->bge_len = segs[0].ds_len;
997 r->bge_idx = i;
998
999 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1000 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1001
1002 return (0);
1003}
1004
1005/*
1006 * Initialize a jumbo receive ring descriptor. This allocates
1007 * a jumbo buffer from the pool managed internally by the driver.
1008 */
1009static int
1010bge_newbuf_jumbo(struct bge_softc *sc, int i)
1011{
1012 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1013 bus_dmamap_t map;
1014 struct bge_extrx_bd *r;
1015 struct mbuf *m;
1016 int error, nsegs;
1017
1018 MGETHDR(m, M_DONTWAIT, MT_DATA);
1019 if (m == NULL)
1020 return (ENOBUFS);
1021
1022 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1023 if (!(m->m_flags & M_EXT)) {
1024 m_freem(m);
1025 return (ENOBUFS);
1026 }
1027 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1028 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1029 m_adj(m, ETHER_ALIGN);
1030
1031 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1032 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1033 if (error != 0) {
1034 m_freem(m);
1035 return (error);
1036 }
1037
1038 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1039 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1040 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1041 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1042 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1043 }
1044 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1045 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1046 sc->bge_cdata.bge_rx_jumbo_sparemap;
1047 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1048 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1051 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1053
1054 /*
1055 * Fill in the extended RX buffer descriptor.
1056 */
1057 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1058 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1059 r->bge_idx = i;
1060 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1061 switch (nsegs) {
1062 case 4:
1063 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1064 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1065 r->bge_len3 = segs[3].ds_len;
1066 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1067 case 3:
1068 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1069 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1070 r->bge_len2 = segs[2].ds_len;
1071 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1072 case 2:
1073 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1074 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1075 r->bge_len1 = segs[1].ds_len;
1076 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1077 case 1:
1078 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1079 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1080 r->bge_len0 = segs[0].ds_len;
1081 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1082 break;
1083 default:
1084 panic("%s: %d segments\n", __func__, nsegs);
1085 }
1086
1087 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1088 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1089
1090 return (0);
1091}
1092
1093static int
1094bge_init_rx_ring_std(struct bge_softc *sc)
1095{
1096 int error, i;
1097
1098 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1099 sc->bge_std = 0;
1100 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1101 if ((error = bge_newbuf_std(sc, i)) != 0)
1102 return (error);
1103 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1104 }
1105
1106 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1107 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1108
1109 sc->bge_std = 0;
1110 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1111
1112 return (0);
1113}
1114
1115static void
1116bge_free_rx_ring_std(struct bge_softc *sc)
1117{
1118 int i;
1119
1120 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1121 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1122 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1123 sc->bge_cdata.bge_rx_std_dmamap[i],
1124 BUS_DMASYNC_POSTREAD);
1125 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1126 sc->bge_cdata.bge_rx_std_dmamap[i]);
1127 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1128 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1129 }
1130 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1131 sizeof(struct bge_rx_bd));
1132 }
1133}
1134
1135static int
1136bge_init_rx_ring_jumbo(struct bge_softc *sc)
1137{
1138 struct bge_rcb *rcb;
1139 int error, i;
1140
1141 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1142 sc->bge_jumbo = 0;
1143 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1144 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1145 return (error);
1146 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1147 }
1148
1149 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1150 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1151
1152 sc->bge_jumbo = 0;
1153
1154 /* Enable the jumbo receive producer ring. */
1155 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1156 rcb->bge_maxlen_flags =
1157 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1158 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1159
1160 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1161
1162 return (0);
1163}
1164
1165static void
1166bge_free_rx_ring_jumbo(struct bge_softc *sc)
1167{
1168 int i;
1169
1170 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1171 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1172 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1173 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1174 BUS_DMASYNC_POSTREAD);
1175 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1176 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1177 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1178 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1179 }
1180 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1181 sizeof(struct bge_extrx_bd));
1182 }
1183}
1184
1185static void
1186bge_free_tx_ring(struct bge_softc *sc)
1187{
1188 int i;
1189
1190 if (sc->bge_ldata.bge_tx_ring == NULL)
1191 return;
1192
1193 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1194 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1195 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1196 sc->bge_cdata.bge_tx_dmamap[i],
1197 BUS_DMASYNC_POSTWRITE);
1198 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1199 sc->bge_cdata.bge_tx_dmamap[i]);
1200 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1201 sc->bge_cdata.bge_tx_chain[i] = NULL;
1202 }
1203 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1204 sizeof(struct bge_tx_bd));
1205 }
1206}
1207
1208static int
1209bge_init_tx_ring(struct bge_softc *sc)
1210{
1211 sc->bge_txcnt = 0;
1212 sc->bge_tx_saved_considx = 0;
1213
1214 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1215 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1216 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1217
1218 /* Initialize transmit producer index for host-memory send ring. */
1219 sc->bge_tx_prodidx = 0;
1220 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1221
1222 /* 5700 b2 errata */
1223 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1224 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1225
1226 /* NIC-memory send ring not used; initialize to zero. */
1227 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1228 /* 5700 b2 errata */
1229 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1231
1232 return (0);
1233}
1234
1235static void
1236bge_setpromisc(struct bge_softc *sc)
1237{
1238 struct ifnet *ifp;
1239
1240 BGE_LOCK_ASSERT(sc);
1241
1242 ifp = sc->bge_ifp;
1243
1244 /* Enable or disable promiscuous mode as needed. */
1245 if (ifp->if_flags & IFF_PROMISC)
1246 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1247 else
1248 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1249}
1250
1251static void
1252bge_setmulti(struct bge_softc *sc)
1253{
1254 struct ifnet *ifp;
1255 struct ifmultiaddr *ifma;
1256 uint32_t hashes[4] = { 0, 0, 0, 0 };
1257 int h, i;
1258
1259 BGE_LOCK_ASSERT(sc);
1260
1261 ifp = sc->bge_ifp;
1262
1263 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1264 for (i = 0; i < 4; i++)
1265 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1266 return;
1267 }
1268
1269 /* First, zot all the existing filters. */
1270 for (i = 0; i < 4; i++)
1271 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1272
1273 /* Now program new ones. */
1274 if_maddr_rlock(ifp);
1275 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1276 if (ifma->ifma_addr->sa_family != AF_LINK)
1277 continue;
1278 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1279 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1280 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1281 }
1282 if_maddr_runlock(ifp);
1283
1284 for (i = 0; i < 4; i++)
1285 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1286}
1287
1288static void
1289bge_setvlan(struct bge_softc *sc)
1290{
1291 struct ifnet *ifp;
1292
1293 BGE_LOCK_ASSERT(sc);
1294
1295 ifp = sc->bge_ifp;
1296
1297 /* Enable or disable VLAN tag stripping as needed. */
1298 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1299 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1300 else
1301 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1302}
1303
1304static void
1305bge_sig_pre_reset(struct bge_softc *sc, int type)
1306{
1307
1308 /*
1309 * Some chips don't like this so only do this if ASF is enabled
1310 */
1311 if (sc->bge_asf_mode)
1312 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1313
1314 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1315 switch (type) {
1316 case BGE_RESET_START:
1317 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1318 BGE_FW_DRV_STATE_START);
1319 break;
1320 case BGE_RESET_STOP:
1321 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1322 BGE_FW_DRV_STATE_UNLOAD);
1323 break;
1324 }
1325 }
1326}
1327
1328static void
1329bge_sig_post_reset(struct bge_softc *sc, int type)
1330{
1331
1332 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1333 switch (type) {
1334 case BGE_RESET_START:
1335 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1336 BGE_FW_DRV_STATE_START_DONE);
1337 /* START DONE */
1338 break;
1339 case BGE_RESET_STOP:
1340 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1341 BGE_FW_DRV_STATE_UNLOAD_DONE);
1342 break;
1343 }
1344 }
1345}
1346
1347static void
1348bge_sig_legacy(struct bge_softc *sc, int type)
1349{
1350
1351 if (sc->bge_asf_mode) {
1352 switch (type) {
1353 case BGE_RESET_START:
1354 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1355 BGE_FW_DRV_STATE_START);
1356 break;
1357 case BGE_RESET_STOP:
1358 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1359 BGE_FW_DRV_STATE_UNLOAD);
1360 break;
1361 }
1362 }
1363}
1364
1365static void
1366bge_stop_fw(struct bge_softc *sc)
1367{
1368 int i;
1369
1370 if (sc->bge_asf_mode) {
1371 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1372 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1373 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1374
1375 for (i = 0; i < 100; i++ ) {
1376 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1377 BGE_RX_CPU_DRV_EVENT))
1378 break;
1379 DELAY(10);
1380 }
1381 }
1382}
1383
443
444static int bge_has_eaddr(struct bge_softc *);
445static uint32_t bge_readmem_ind(struct bge_softc *, int);
446static void bge_writemem_ind(struct bge_softc *, int, int);
447static void bge_writembx(struct bge_softc *, int, int);
448#ifdef notdef
449static uint32_t bge_readreg_ind(struct bge_softc *, int);
450#endif
451static void bge_writemem_direct(struct bge_softc *, int, int);
452static void bge_writereg_ind(struct bge_softc *, int, int);
453
454static int bge_miibus_readreg(device_t, int, int);
455static int bge_miibus_writereg(device_t, int, int, int);
456static void bge_miibus_statchg(device_t);
457#ifdef DEVICE_POLLING
458static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
459#endif
460
461#define BGE_RESET_START 1
462#define BGE_RESET_STOP 2
463static void bge_sig_post_reset(struct bge_softc *, int);
464static void bge_sig_legacy(struct bge_softc *, int);
465static void bge_sig_pre_reset(struct bge_softc *, int);
466static void bge_stop_fw(struct bge_softc *);
467static int bge_reset(struct bge_softc *);
468static void bge_link_upd(struct bge_softc *);
469
470/*
471 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
472 * leak information to untrusted users. It is also known to cause alignment
473 * traps on certain architectures.
474 */
475#ifdef BGE_REGISTER_DEBUG
476static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
477static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
478static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
479#endif
480static void bge_add_sysctls(struct bge_softc *);
481static void bge_add_sysctl_stats_regs(struct bge_softc *,
482 struct sysctl_ctx_list *, struct sysctl_oid_list *);
483static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
484 struct sysctl_oid_list *);
485static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
486
487static device_method_t bge_methods[] = {
488 /* Device interface */
489 DEVMETHOD(device_probe, bge_probe),
490 DEVMETHOD(device_attach, bge_attach),
491 DEVMETHOD(device_detach, bge_detach),
492 DEVMETHOD(device_shutdown, bge_shutdown),
493 DEVMETHOD(device_suspend, bge_suspend),
494 DEVMETHOD(device_resume, bge_resume),
495
496 /* bus interface */
497 DEVMETHOD(bus_print_child, bus_generic_print_child),
498 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
499
500 /* MII interface */
501 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
502 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
503 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
504
505 { 0, 0 }
506};
507
508static driver_t bge_driver = {
509 "bge",
510 bge_methods,
511 sizeof(struct bge_softc)
512};
513
514static devclass_t bge_devclass;
515
516DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
517DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
518
519static int bge_allow_asf = 1;
520
521TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
522
523SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
524SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
525 "Allow ASF mode if available");
526
527#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
528#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
529#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
530#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
531#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
532
533static int
534bge_has_eaddr(struct bge_softc *sc)
535{
536#ifdef __sparc64__
537 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
538 device_t dev;
539 uint32_t subvendor;
540
541 dev = sc->bge_dev;
542
543 /*
544 * The on-board BGEs found in sun4u machines aren't fitted with
545 * an EEPROM which means that we have to obtain the MAC address
546 * via OFW and that some tests will always fail. We distinguish
547 * such BGEs by the subvendor ID, which also has to be obtained
548 * from OFW instead of the PCI configuration space as the latter
549 * indicates Broadcom as the subvendor of the netboot interface.
550 * For early Blade 1500 and 2500 we even have to check the OFW
551 * device path as the subvendor ID always defaults to Broadcom
552 * there.
553 */
554 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
555 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
556 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
557 return (0);
558 memset(buf, 0, sizeof(buf));
559 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
560 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
561 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
562 return (0);
563 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
564 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
565 return (0);
566 }
567#endif
568 return (1);
569}
570
571static uint32_t
572bge_readmem_ind(struct bge_softc *sc, int off)
573{
574 device_t dev;
575 uint32_t val;
576
577 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
578 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
579 return (0);
580
581 dev = sc->bge_dev;
582
583 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
584 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
585 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
586 return (val);
587}
588
589static void
590bge_writemem_ind(struct bge_softc *sc, int off, int val)
591{
592 device_t dev;
593
594 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
595 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
596 return;
597
598 dev = sc->bge_dev;
599
600 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
601 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
602 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
603}
604
605#ifdef notdef
606static uint32_t
607bge_readreg_ind(struct bge_softc *sc, int off)
608{
609 device_t dev;
610
611 dev = sc->bge_dev;
612
613 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
614 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
615}
616#endif
617
618static void
619bge_writereg_ind(struct bge_softc *sc, int off, int val)
620{
621 device_t dev;
622
623 dev = sc->bge_dev;
624
625 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
626 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
627}
628
629static void
630bge_writemem_direct(struct bge_softc *sc, int off, int val)
631{
632 CSR_WRITE_4(sc, off, val);
633}
634
635static void
636bge_writembx(struct bge_softc *sc, int off, int val)
637{
638 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
639 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
640
641 CSR_WRITE_4(sc, off, val);
642}
643
644/*
645 * Map a single buffer address.
646 */
647
648static void
649bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
650{
651 struct bge_dmamap_arg *ctx;
652
653 if (error)
654 return;
655
656 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
657
658 ctx = arg;
659 ctx->bge_busaddr = segs->ds_addr;
660}
661
662static uint8_t
663bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
664{
665 uint32_t access, byte = 0;
666 int i;
667
668 /* Lock. */
669 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
670 for (i = 0; i < 8000; i++) {
671 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
672 break;
673 DELAY(20);
674 }
675 if (i == 8000)
676 return (1);
677
678 /* Enable access. */
679 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
680 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
681
682 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
683 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
684 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
685 DELAY(10);
686 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
687 DELAY(10);
688 break;
689 }
690 }
691
692 if (i == BGE_TIMEOUT * 10) {
693 if_printf(sc->bge_ifp, "nvram read timed out\n");
694 return (1);
695 }
696
697 /* Get result. */
698 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
699
700 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
701
702 /* Disable access. */
703 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
704
705 /* Unlock. */
706 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
707 CSR_READ_4(sc, BGE_NVRAM_SWARB);
708
709 return (0);
710}
711
712/*
713 * Read a sequence of bytes from NVRAM.
714 */
715static int
716bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
717{
718 int err = 0, i;
719 uint8_t byte = 0;
720
721 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
722 return (1);
723
724 for (i = 0; i < cnt; i++) {
725 err = bge_nvram_getbyte(sc, off + i, &byte);
726 if (err)
727 break;
728 *(dest + i) = byte;
729 }
730
731 return (err ? 1 : 0);
732}
733
734/*
735 * Read a byte of data stored in the EEPROM at address 'addr.' The
736 * BCM570x supports both the traditional bitbang interface and an
737 * auto access interface for reading the EEPROM. We use the auto
738 * access method.
739 */
740static uint8_t
741bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
742{
743 int i;
744 uint32_t byte = 0;
745
746 /*
747 * Enable use of auto EEPROM access so we can avoid
748 * having to use the bitbang method.
749 */
750 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
751
752 /* Reset the EEPROM, load the clock period. */
753 CSR_WRITE_4(sc, BGE_EE_ADDR,
754 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
755 DELAY(20);
756
757 /* Issue the read EEPROM command. */
758 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
759
760 /* Wait for completion */
761 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
762 DELAY(10);
763 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
764 break;
765 }
766
767 if (i == BGE_TIMEOUT * 10) {
768 device_printf(sc->bge_dev, "EEPROM read timed out\n");
769 return (1);
770 }
771
772 /* Get result. */
773 byte = CSR_READ_4(sc, BGE_EE_DATA);
774
775 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
776
777 return (0);
778}
779
780/*
781 * Read a sequence of bytes from the EEPROM.
782 */
783static int
784bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
785{
786 int i, error = 0;
787 uint8_t byte = 0;
788
789 for (i = 0; i < cnt; i++) {
790 error = bge_eeprom_getbyte(sc, off + i, &byte);
791 if (error)
792 break;
793 *(dest + i) = byte;
794 }
795
796 return (error ? 1 : 0);
797}
798
799static int
800bge_miibus_readreg(device_t dev, int phy, int reg)
801{
802 struct bge_softc *sc;
803 uint32_t val;
804 int i;
805
806 sc = device_get_softc(dev);
807
808 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
809 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
810 CSR_WRITE_4(sc, BGE_MI_MODE,
811 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
812 DELAY(80);
813 }
814
815 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
816 BGE_MIPHY(phy) | BGE_MIREG(reg));
817
818 /* Poll for the PHY register access to complete. */
819 for (i = 0; i < BGE_TIMEOUT; i++) {
820 DELAY(10);
821 val = CSR_READ_4(sc, BGE_MI_COMM);
822 if ((val & BGE_MICOMM_BUSY) == 0) {
823 DELAY(5);
824 val = CSR_READ_4(sc, BGE_MI_COMM);
825 break;
826 }
827 }
828
829 if (i == BGE_TIMEOUT) {
830 device_printf(sc->bge_dev,
831 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
832 phy, reg, val);
833 val = 0;
834 }
835
836 /* Restore the autopoll bit if necessary. */
837 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
838 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
839 DELAY(80);
840 }
841
842 if (val & BGE_MICOMM_READFAIL)
843 return (0);
844
845 return (val & 0xFFFF);
846}
847
848static int
849bge_miibus_writereg(device_t dev, int phy, int reg, int val)
850{
851 struct bge_softc *sc;
852 int i;
853
854 sc = device_get_softc(dev);
855
856 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
857 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
858 return (0);
859
860 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
861 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
862 CSR_WRITE_4(sc, BGE_MI_MODE,
863 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
864 DELAY(80);
865 }
866
867 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
868 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
869
870 for (i = 0; i < BGE_TIMEOUT; i++) {
871 DELAY(10);
872 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
873 DELAY(5);
874 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
875 break;
876 }
877 }
878
879 /* Restore the autopoll bit if necessary. */
880 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
881 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
882 DELAY(80);
883 }
884
885 if (i == BGE_TIMEOUT)
886 device_printf(sc->bge_dev,
887 "PHY write timed out (phy %d, reg %d, val %d)\n",
888 phy, reg, val);
889
890 return (0);
891}
892
893static void
894bge_miibus_statchg(device_t dev)
895{
896 struct bge_softc *sc;
897 struct mii_data *mii;
898 sc = device_get_softc(dev);
899 mii = device_get_softc(sc->bge_miibus);
900
901 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
902 (IFM_ACTIVE | IFM_AVALID)) {
903 switch (IFM_SUBTYPE(mii->mii_media_active)) {
904 case IFM_10_T:
905 case IFM_100_TX:
906 sc->bge_link = 1;
907 break;
908 case IFM_1000_T:
909 case IFM_1000_SX:
910 case IFM_2500_SX:
911 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
912 sc->bge_link = 1;
913 else
914 sc->bge_link = 0;
915 break;
916 default:
917 sc->bge_link = 0;
918 break;
919 }
920 } else
921 sc->bge_link = 0;
922 if (sc->bge_link == 0)
923 return;
924 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
925 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
926 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
927 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
928 else
929 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
930
931 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
932 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
933 if ((IFM_OPTIONS(mii->mii_media_active) &
934 IFM_ETH_TXPAUSE) != 0)
935 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
936 else
937 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
938 if ((IFM_OPTIONS(mii->mii_media_active) &
939 IFM_ETH_RXPAUSE) != 0)
940 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
941 else
942 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
943 } else {
944 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
945 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
946 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
947 }
948}
949
950/*
951 * Intialize a standard receive ring descriptor.
952 */
953static int
954bge_newbuf_std(struct bge_softc *sc, int i)
955{
956 struct mbuf *m;
957 struct bge_rx_bd *r;
958 bus_dma_segment_t segs[1];
959 bus_dmamap_t map;
960 int error, nsegs;
961
962 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
963 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
964 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
965 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
966 if (m == NULL)
967 return (ENOBUFS);
968 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
969 } else {
970 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
971 if (m == NULL)
972 return (ENOBUFS);
973 m->m_len = m->m_pkthdr.len = MCLBYTES;
974 }
975 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
976 m_adj(m, ETHER_ALIGN);
977
978 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
979 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
980 if (error != 0) {
981 m_freem(m);
982 return (error);
983 }
984 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
985 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
986 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
987 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
988 sc->bge_cdata.bge_rx_std_dmamap[i]);
989 }
990 map = sc->bge_cdata.bge_rx_std_dmamap[i];
991 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
992 sc->bge_cdata.bge_rx_std_sparemap = map;
993 sc->bge_cdata.bge_rx_std_chain[i] = m;
994 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
995 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
996 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
997 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
998 r->bge_flags = BGE_RXBDFLAG_END;
999 r->bge_len = segs[0].ds_len;
1000 r->bge_idx = i;
1001
1002 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1003 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1004
1005 return (0);
1006}
1007
1008/*
1009 * Initialize a jumbo receive ring descriptor. This allocates
1010 * a jumbo buffer from the pool managed internally by the driver.
1011 */
1012static int
1013bge_newbuf_jumbo(struct bge_softc *sc, int i)
1014{
1015 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1016 bus_dmamap_t map;
1017 struct bge_extrx_bd *r;
1018 struct mbuf *m;
1019 int error, nsegs;
1020
1021 MGETHDR(m, M_DONTWAIT, MT_DATA);
1022 if (m == NULL)
1023 return (ENOBUFS);
1024
1025 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1026 if (!(m->m_flags & M_EXT)) {
1027 m_freem(m);
1028 return (ENOBUFS);
1029 }
1030 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1031 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1032 m_adj(m, ETHER_ALIGN);
1033
1034 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1035 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1036 if (error != 0) {
1037 m_freem(m);
1038 return (error);
1039 }
1040
1041 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1042 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1043 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1044 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1045 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1046 }
1047 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1048 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1049 sc->bge_cdata.bge_rx_jumbo_sparemap;
1050 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1051 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1053 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1054 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1055 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1056
1057 /*
1058 * Fill in the extended RX buffer descriptor.
1059 */
1060 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1061 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1062 r->bge_idx = i;
1063 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1064 switch (nsegs) {
1065 case 4:
1066 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1067 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1068 r->bge_len3 = segs[3].ds_len;
1069 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1070 case 3:
1071 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1072 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1073 r->bge_len2 = segs[2].ds_len;
1074 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1075 case 2:
1076 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1077 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1078 r->bge_len1 = segs[1].ds_len;
1079 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1080 case 1:
1081 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1082 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1083 r->bge_len0 = segs[0].ds_len;
1084 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1085 break;
1086 default:
1087 panic("%s: %d segments\n", __func__, nsegs);
1088 }
1089
1090 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1091 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1092
1093 return (0);
1094}
1095
1096static int
1097bge_init_rx_ring_std(struct bge_softc *sc)
1098{
1099 int error, i;
1100
1101 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1102 sc->bge_std = 0;
1103 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1104 if ((error = bge_newbuf_std(sc, i)) != 0)
1105 return (error);
1106 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1107 }
1108
1109 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1110 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1111
1112 sc->bge_std = 0;
1113 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1114
1115 return (0);
1116}
1117
1118static void
1119bge_free_rx_ring_std(struct bge_softc *sc)
1120{
1121 int i;
1122
1123 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1124 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1125 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1126 sc->bge_cdata.bge_rx_std_dmamap[i],
1127 BUS_DMASYNC_POSTREAD);
1128 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1129 sc->bge_cdata.bge_rx_std_dmamap[i]);
1130 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1131 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1132 }
1133 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1134 sizeof(struct bge_rx_bd));
1135 }
1136}
1137
1138static int
1139bge_init_rx_ring_jumbo(struct bge_softc *sc)
1140{
1141 struct bge_rcb *rcb;
1142 int error, i;
1143
1144 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1145 sc->bge_jumbo = 0;
1146 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1147 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1148 return (error);
1149 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1150 }
1151
1152 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1153 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1154
1155 sc->bge_jumbo = 0;
1156
1157 /* Enable the jumbo receive producer ring. */
1158 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1159 rcb->bge_maxlen_flags =
1160 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1161 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1162
1163 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1164
1165 return (0);
1166}
1167
1168static void
1169bge_free_rx_ring_jumbo(struct bge_softc *sc)
1170{
1171 int i;
1172
1173 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1174 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1175 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1176 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1177 BUS_DMASYNC_POSTREAD);
1178 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1179 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1180 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1181 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1182 }
1183 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1184 sizeof(struct bge_extrx_bd));
1185 }
1186}
1187
1188static void
1189bge_free_tx_ring(struct bge_softc *sc)
1190{
1191 int i;
1192
1193 if (sc->bge_ldata.bge_tx_ring == NULL)
1194 return;
1195
1196 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1197 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1198 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1199 sc->bge_cdata.bge_tx_dmamap[i],
1200 BUS_DMASYNC_POSTWRITE);
1201 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1202 sc->bge_cdata.bge_tx_dmamap[i]);
1203 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1204 sc->bge_cdata.bge_tx_chain[i] = NULL;
1205 }
1206 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1207 sizeof(struct bge_tx_bd));
1208 }
1209}
1210
1211static int
1212bge_init_tx_ring(struct bge_softc *sc)
1213{
1214 sc->bge_txcnt = 0;
1215 sc->bge_tx_saved_considx = 0;
1216
1217 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1218 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1219 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1220
1221 /* Initialize transmit producer index for host-memory send ring. */
1222 sc->bge_tx_prodidx = 0;
1223 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1224
1225 /* 5700 b2 errata */
1226 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1227 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1228
1229 /* NIC-memory send ring not used; initialize to zero. */
1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1231 /* 5700 b2 errata */
1232 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1233 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1234
1235 return (0);
1236}
1237
1238static void
1239bge_setpromisc(struct bge_softc *sc)
1240{
1241 struct ifnet *ifp;
1242
1243 BGE_LOCK_ASSERT(sc);
1244
1245 ifp = sc->bge_ifp;
1246
1247 /* Enable or disable promiscuous mode as needed. */
1248 if (ifp->if_flags & IFF_PROMISC)
1249 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1250 else
1251 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1252}
1253
1254static void
1255bge_setmulti(struct bge_softc *sc)
1256{
1257 struct ifnet *ifp;
1258 struct ifmultiaddr *ifma;
1259 uint32_t hashes[4] = { 0, 0, 0, 0 };
1260 int h, i;
1261
1262 BGE_LOCK_ASSERT(sc);
1263
1264 ifp = sc->bge_ifp;
1265
1266 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1267 for (i = 0; i < 4; i++)
1268 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1269 return;
1270 }
1271
1272 /* First, zot all the existing filters. */
1273 for (i = 0; i < 4; i++)
1274 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1275
1276 /* Now program new ones. */
1277 if_maddr_rlock(ifp);
1278 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1279 if (ifma->ifma_addr->sa_family != AF_LINK)
1280 continue;
1281 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1282 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1283 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1284 }
1285 if_maddr_runlock(ifp);
1286
1287 for (i = 0; i < 4; i++)
1288 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1289}
1290
1291static void
1292bge_setvlan(struct bge_softc *sc)
1293{
1294 struct ifnet *ifp;
1295
1296 BGE_LOCK_ASSERT(sc);
1297
1298 ifp = sc->bge_ifp;
1299
1300 /* Enable or disable VLAN tag stripping as needed. */
1301 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1302 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1303 else
1304 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1305}
1306
1307static void
1308bge_sig_pre_reset(struct bge_softc *sc, int type)
1309{
1310
1311 /*
1312 * Some chips don't like this so only do this if ASF is enabled
1313 */
1314 if (sc->bge_asf_mode)
1315 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1316
1317 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1318 switch (type) {
1319 case BGE_RESET_START:
1320 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1321 BGE_FW_DRV_STATE_START);
1322 break;
1323 case BGE_RESET_STOP:
1324 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1325 BGE_FW_DRV_STATE_UNLOAD);
1326 break;
1327 }
1328 }
1329}
1330
1331static void
1332bge_sig_post_reset(struct bge_softc *sc, int type)
1333{
1334
1335 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1336 switch (type) {
1337 case BGE_RESET_START:
1338 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1339 BGE_FW_DRV_STATE_START_DONE);
1340 /* START DONE */
1341 break;
1342 case BGE_RESET_STOP:
1343 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1344 BGE_FW_DRV_STATE_UNLOAD_DONE);
1345 break;
1346 }
1347 }
1348}
1349
1350static void
1351bge_sig_legacy(struct bge_softc *sc, int type)
1352{
1353
1354 if (sc->bge_asf_mode) {
1355 switch (type) {
1356 case BGE_RESET_START:
1357 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1358 BGE_FW_DRV_STATE_START);
1359 break;
1360 case BGE_RESET_STOP:
1361 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1362 BGE_FW_DRV_STATE_UNLOAD);
1363 break;
1364 }
1365 }
1366}
1367
1368static void
1369bge_stop_fw(struct bge_softc *sc)
1370{
1371 int i;
1372
1373 if (sc->bge_asf_mode) {
1374 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1375 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1376 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1377
1378 for (i = 0; i < 100; i++ ) {
1379 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1380 BGE_RX_CPU_DRV_EVENT))
1381 break;
1382 DELAY(10);
1383 }
1384 }
1385}
1386
1387static uint32_t
1388bge_dma_swap_options(struct bge_softc *sc)
1389{
1390 uint32_t dma_options;
1391
1392 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1393 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1394#if BYTE_ORDER == BIG_ENDIAN
1395 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1396#endif
1397 if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
1398 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1399 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1400 BGE_MODECTL_HTX2B_ENABLE;
1401
1402 return (dma_options);
1403}
1404
1384/*
1385 * Do endian, PCI and DMA initialization.
1386 */
1387static int
1388bge_chipinit(struct bge_softc *sc)
1389{
1405/*
1406 * Do endian, PCI and DMA initialization.
1407 */
1408static int
1409bge_chipinit(struct bge_softc *sc)
1410{
1390 uint32_t dma_rw_ctl, misc_ctl;
1411 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1391 uint16_t val;
1392 int i;
1393
1394 /* Set endianness before we access any non-PCI registers. */
1395 misc_ctl = BGE_INIT;
1396 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1397 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1398 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1399
1400 /* Clear the MAC control register */
1401 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1402
1403 /*
1404 * Clear the MAC statistics block in the NIC's
1405 * internal memory.
1406 */
1407 for (i = BGE_STATS_BLOCK;
1408 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1409 BGE_MEMWIN_WRITE(sc, i, 0);
1410
1411 for (i = BGE_STATUS_BLOCK;
1412 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1413 BGE_MEMWIN_WRITE(sc, i, 0);
1414
1415 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1416 /*
1417 * Fix data corruption caused by non-qword write with WB.
1418 * Fix master abort in PCI mode.
1419 * Fix PCI latency timer.
1420 */
1421 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1422 val |= (1 << 10) | (1 << 12) | (1 << 13);
1423 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1424 }
1425
1426 /*
1427 * Set up the PCI DMA control register.
1428 */
1429 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1430 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1431 if (sc->bge_flags & BGE_FLAG_PCIE) {
1432 /* Read watermark not used, 128 bytes for write. */
1433 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1434 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1435 if (BGE_IS_5714_FAMILY(sc)) {
1436 /* 256 bytes for read and write. */
1437 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1438 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1439 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1440 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1441 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1442 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1443 /*
1444 * In the BCM5703, the DMA read watermark should
1445 * be set to less than or equal to the maximum
1446 * memory read byte count of the PCI-X command
1447 * register.
1448 */
1449 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1450 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1451 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1452 /* 1536 bytes for read, 384 bytes for write. */
1453 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1454 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1455 } else {
1456 /* 384 bytes for read and write. */
1457 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1458 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1459 0x0F;
1460 }
1461 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1462 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1463 uint32_t tmp;
1464
1465 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1466 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1467 if (tmp == 6 || tmp == 7)
1468 dma_rw_ctl |=
1469 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1470
1471 /* Set PCI-X DMA write workaround. */
1472 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1473 }
1474 } else {
1475 /* Conventional PCI bus: 256 bytes for read and write. */
1476 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1477 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1478
1479 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1480 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1481 dma_rw_ctl |= 0x0F;
1482 }
1483 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1484 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1485 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1486 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1487 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1488 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1489 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1490 if (BGE_IS_5717_PLUS(sc)) {
1491 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1492 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1493 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1494 /*
1495 * Enable HW workaround for controllers that misinterpret
1496 * a status tag update and leave interrupts permanently
1497 * disabled.
1498 */
1499 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1500 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1501 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1502 }
1503 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1504
1505 /*
1506 * Set up general mode register.
1507 */
1412 uint16_t val;
1413 int i;
1414
1415 /* Set endianness before we access any non-PCI registers. */
1416 misc_ctl = BGE_INIT;
1417 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1418 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1419 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1420
1421 /* Clear the MAC control register */
1422 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1423
1424 /*
1425 * Clear the MAC statistics block in the NIC's
1426 * internal memory.
1427 */
1428 for (i = BGE_STATS_BLOCK;
1429 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1430 BGE_MEMWIN_WRITE(sc, i, 0);
1431
1432 for (i = BGE_STATUS_BLOCK;
1433 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1434 BGE_MEMWIN_WRITE(sc, i, 0);
1435
1436 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1437 /*
1438 * Fix data corruption caused by non-qword write with WB.
1439 * Fix master abort in PCI mode.
1440 * Fix PCI latency timer.
1441 */
1442 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1443 val |= (1 << 10) | (1 << 12) | (1 << 13);
1444 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1445 }
1446
1447 /*
1448 * Set up the PCI DMA control register.
1449 */
1450 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1451 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1452 if (sc->bge_flags & BGE_FLAG_PCIE) {
1453 /* Read watermark not used, 128 bytes for write. */
1454 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1455 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1456 if (BGE_IS_5714_FAMILY(sc)) {
1457 /* 256 bytes for read and write. */
1458 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1459 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1460 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1461 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1462 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1463 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1464 /*
1465 * In the BCM5703, the DMA read watermark should
1466 * be set to less than or equal to the maximum
1467 * memory read byte count of the PCI-X command
1468 * register.
1469 */
1470 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1471 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1472 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1473 /* 1536 bytes for read, 384 bytes for write. */
1474 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1475 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1476 } else {
1477 /* 384 bytes for read and write. */
1478 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1479 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1480 0x0F;
1481 }
1482 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1483 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1484 uint32_t tmp;
1485
1486 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1487 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1488 if (tmp == 6 || tmp == 7)
1489 dma_rw_ctl |=
1490 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1491
1492 /* Set PCI-X DMA write workaround. */
1493 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1494 }
1495 } else {
1496 /* Conventional PCI bus: 256 bytes for read and write. */
1497 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1498 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1499
1500 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1501 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1502 dma_rw_ctl |= 0x0F;
1503 }
1504 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1505 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1506 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1507 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1508 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1509 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1510 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1511 if (BGE_IS_5717_PLUS(sc)) {
1512 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1513 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1514 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1515 /*
1516 * Enable HW workaround for controllers that misinterpret
1517 * a status tag update and leave interrupts permanently
1518 * disabled.
1519 */
1520 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1521 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1522 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1523 }
1524 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1525
1526 /*
1527 * Set up general mode register.
1528 */
1508 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1509 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1510 BGE_MODECTL_TX_NO_PHDR_CSUM);
1529 mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1530 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1511
1512 /*
1513 * BCM5701 B5 have a bug causing data corruption when using
1514 * 64-bit DMA reads, which can be terminated early and then
1515 * completed later as 32-bit accesses, in combination with
1516 * certain bridges.
1517 */
1518 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1519 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1531
1532 /*
1533 * BCM5701 B5 have a bug causing data corruption when using
1534 * 64-bit DMA reads, which can be terminated early and then
1535 * completed later as 32-bit accesses, in combination with
1536 * certain bridges.
1537 */
1538 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1539 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1520 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1540 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1521
1522 /*
1523 * Tell the firmware the driver is running
1524 */
1525 if (sc->bge_asf_mode & ASF_STACKUP)
1541
1542 /*
1543 * Tell the firmware the driver is running
1544 */
1545 if (sc->bge_asf_mode & ASF_STACKUP)
1526 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1546 mode_ctl |= BGE_MODECTL_STACKUP;
1527
1547
1548 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1549
1528 /*
1529 * Disable memory write invalidate. Apparently it is not supported
1530 * properly by these devices. Also ensure that INTx isn't disabled,
1531 * as these chips need it even when using MSI.
1532 */
1533 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1534 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1535
1536 /* Set the timer prescaler (always 66Mhz) */
1537 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1538
1539 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1540 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1541 DELAY(40); /* XXX */
1542
1543 /* Put PHY into ready state */
1544 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1545 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1546 DELAY(40);
1547 }
1548
1549 return (0);
1550}
1551
1552static int
1553bge_blockinit(struct bge_softc *sc)
1554{
1555 struct bge_rcb *rcb;
1556 bus_size_t vrcb;
1557 bge_hostaddr taddr;
1558 uint32_t dmactl, val;
1559 int i, limit;
1560
1561 /*
1562 * Initialize the memory window pointer register so that
1563 * we can access the first 32K of internal NIC RAM. This will
1564 * allow us to set up the TX send ring RCBs and the RX return
1565 * ring RCBs, plus other things which live in NIC memory.
1566 */
1567 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1568
1569 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1570
1571 if (!(BGE_IS_5705_PLUS(sc))) {
1572 /* Configure mbuf memory pool */
1573 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1574 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1575 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1576 else
1577 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1578
1579 /* Configure DMA resource pool */
1580 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1581 BGE_DMA_DESCRIPTORS);
1582 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1583 }
1584
1585 /* Configure mbuf pool watermarks */
1550 /*
1551 * Disable memory write invalidate. Apparently it is not supported
1552 * properly by these devices. Also ensure that INTx isn't disabled,
1553 * as these chips need it even when using MSI.
1554 */
1555 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1556 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1557
1558 /* Set the timer prescaler (always 66Mhz) */
1559 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1560
1561 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1562 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1563 DELAY(40); /* XXX */
1564
1565 /* Put PHY into ready state */
1566 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1567 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1568 DELAY(40);
1569 }
1570
1571 return (0);
1572}
1573
1574static int
1575bge_blockinit(struct bge_softc *sc)
1576{
1577 struct bge_rcb *rcb;
1578 bus_size_t vrcb;
1579 bge_hostaddr taddr;
1580 uint32_t dmactl, val;
1581 int i, limit;
1582
1583 /*
1584 * Initialize the memory window pointer register so that
1585 * we can access the first 32K of internal NIC RAM. This will
1586 * allow us to set up the TX send ring RCBs and the RX return
1587 * ring RCBs, plus other things which live in NIC memory.
1588 */
1589 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1590
1591 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1592
1593 if (!(BGE_IS_5705_PLUS(sc))) {
1594 /* Configure mbuf memory pool */
1595 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1596 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1597 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1598 else
1599 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1600
1601 /* Configure DMA resource pool */
1602 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1603 BGE_DMA_DESCRIPTORS);
1604 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1605 }
1606
1607 /* Configure mbuf pool watermarks */
1586 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1587 sc->bge_asicrev == BGE_ASICREV_BCM57765) {
1608 if (BGE_IS_5717_PLUS(sc)) {
1588 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1589 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1590 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1591 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1592 } else {
1593 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1594 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1595 }
1596 } else if (!BGE_IS_5705_PLUS(sc)) {
1597 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1598 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1599 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1600 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1601 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1602 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1603 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1604 } else {
1605 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1606 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1607 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1608 }
1609
1610 /* Configure DMA resource watermarks */
1611 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1612 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1613
1614 /* Enable buffer manager */
1615 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1616 /*
1617 * Change the arbitration algorithm of TXMBUF read request to
1618 * round-robin instead of priority based for BCM5719. When
1619 * TXFIFO is almost empty, RDMA will hold its request until
1620 * TXFIFO is not almost empty.
1621 */
1622 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1623 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1624 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1625
1626 /* Poll for buffer manager start indication */
1627 for (i = 0; i < BGE_TIMEOUT; i++) {
1628 DELAY(10);
1629 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1630 break;
1631 }
1632
1633 if (i == BGE_TIMEOUT) {
1634 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1635 return (ENXIO);
1636 }
1637
1638 /* Enable flow-through queues */
1639 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1640 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1641
1642 /* Wait until queue initialization is complete */
1643 for (i = 0; i < BGE_TIMEOUT; i++) {
1644 DELAY(10);
1645 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1646 break;
1647 }
1648
1649 if (i == BGE_TIMEOUT) {
1650 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1651 return (ENXIO);
1652 }
1653
1654 /*
1655 * Summary of rings supported by the controller:
1656 *
1657 * Standard Receive Producer Ring
1658 * - This ring is used to feed receive buffers for "standard"
1659 * sized frames (typically 1536 bytes) to the controller.
1660 *
1661 * Jumbo Receive Producer Ring
1662 * - This ring is used to feed receive buffers for jumbo sized
1663 * frames (i.e. anything bigger than the "standard" frames)
1664 * to the controller.
1665 *
1666 * Mini Receive Producer Ring
1667 * - This ring is used to feed receive buffers for "mini"
1668 * sized frames to the controller.
1669 * - This feature required external memory for the controller
1670 * but was never used in a production system. Should always
1671 * be disabled.
1672 *
1673 * Receive Return Ring
1674 * - After the controller has placed an incoming frame into a
1675 * receive buffer that buffer is moved into a receive return
1676 * ring. The driver is then responsible to passing the
1677 * buffer up to the stack. Many versions of the controller
1678 * support multiple RR rings.
1679 *
1680 * Send Ring
1681 * - This ring is used for outgoing frames. Many versions of
1682 * the controller support multiple send rings.
1683 */
1684
1685 /* Initialize the standard receive producer ring control block. */
1686 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1687 rcb->bge_hostaddr.bge_addr_lo =
1688 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1689 rcb->bge_hostaddr.bge_addr_hi =
1690 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1691 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1692 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1693 if (BGE_IS_5717_PLUS(sc)) {
1694 /*
1695 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1696 * Bits 15-2 : Maximum RX frame size
1697 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1698 * Bit 0 : Reserved
1699 */
1700 rcb->bge_maxlen_flags =
1701 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1702 } else if (BGE_IS_5705_PLUS(sc)) {
1703 /*
1704 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1705 * Bits 15-2 : Reserved (should be 0)
1706 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1707 * Bit 0 : Reserved
1708 */
1709 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1710 } else {
1711 /*
1712 * Ring size is always XXX entries
1713 * Bits 31-16: Maximum RX frame size
1714 * Bits 15-2 : Reserved (should be 0)
1715 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1716 * Bit 0 : Reserved
1717 */
1718 rcb->bge_maxlen_flags =
1719 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1720 }
1721 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1609 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1610 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1611 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1612 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1613 } else {
1614 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1615 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1616 }
1617 } else if (!BGE_IS_5705_PLUS(sc)) {
1618 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1619 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1620 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1621 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1622 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1623 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1624 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1625 } else {
1626 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1627 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1628 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1629 }
1630
1631 /* Configure DMA resource watermarks */
1632 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1633 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1634
1635 /* Enable buffer manager */
1636 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1637 /*
1638 * Change the arbitration algorithm of TXMBUF read request to
1639 * round-robin instead of priority based for BCM5719. When
1640 * TXFIFO is almost empty, RDMA will hold its request until
1641 * TXFIFO is not almost empty.
1642 */
1643 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1644 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1645 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1646
1647 /* Poll for buffer manager start indication */
1648 for (i = 0; i < BGE_TIMEOUT; i++) {
1649 DELAY(10);
1650 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1651 break;
1652 }
1653
1654 if (i == BGE_TIMEOUT) {
1655 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1656 return (ENXIO);
1657 }
1658
1659 /* Enable flow-through queues */
1660 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1661 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1662
1663 /* Wait until queue initialization is complete */
1664 for (i = 0; i < BGE_TIMEOUT; i++) {
1665 DELAY(10);
1666 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1667 break;
1668 }
1669
1670 if (i == BGE_TIMEOUT) {
1671 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1672 return (ENXIO);
1673 }
1674
1675 /*
1676 * Summary of rings supported by the controller:
1677 *
1678 * Standard Receive Producer Ring
1679 * - This ring is used to feed receive buffers for "standard"
1680 * sized frames (typically 1536 bytes) to the controller.
1681 *
1682 * Jumbo Receive Producer Ring
1683 * - This ring is used to feed receive buffers for jumbo sized
1684 * frames (i.e. anything bigger than the "standard" frames)
1685 * to the controller.
1686 *
1687 * Mini Receive Producer Ring
1688 * - This ring is used to feed receive buffers for "mini"
1689 * sized frames to the controller.
1690 * - This feature required external memory for the controller
1691 * but was never used in a production system. Should always
1692 * be disabled.
1693 *
1694 * Receive Return Ring
1695 * - After the controller has placed an incoming frame into a
1696 * receive buffer that buffer is moved into a receive return
1697 * ring. The driver is then responsible to passing the
1698 * buffer up to the stack. Many versions of the controller
1699 * support multiple RR rings.
1700 *
1701 * Send Ring
1702 * - This ring is used for outgoing frames. Many versions of
1703 * the controller support multiple send rings.
1704 */
1705
1706 /* Initialize the standard receive producer ring control block. */
1707 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1708 rcb->bge_hostaddr.bge_addr_lo =
1709 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1710 rcb->bge_hostaddr.bge_addr_hi =
1711 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1712 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1713 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1714 if (BGE_IS_5717_PLUS(sc)) {
1715 /*
1716 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1717 * Bits 15-2 : Maximum RX frame size
1718 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1719 * Bit 0 : Reserved
1720 */
1721 rcb->bge_maxlen_flags =
1722 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1723 } else if (BGE_IS_5705_PLUS(sc)) {
1724 /*
1725 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1726 * Bits 15-2 : Reserved (should be 0)
1727 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1728 * Bit 0 : Reserved
1729 */
1730 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1731 } else {
1732 /*
1733 * Ring size is always XXX entries
1734 * Bits 31-16: Maximum RX frame size
1735 * Bits 15-2 : Reserved (should be 0)
1736 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1737 * Bit 0 : Reserved
1738 */
1739 rcb->bge_maxlen_flags =
1740 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1741 }
1742 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1722 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1743 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1744 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1723 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1724 else
1725 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1726 /* Write the standard receive producer ring control block. */
1727 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1728 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1729 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1730 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1731
1732 /* Reset the standard receive producer ring producer index. */
1733 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1734
1735 /*
1736 * Initialize the jumbo RX producer ring control
1737 * block. We set the 'ring disabled' bit in the
1738 * flags field until we're actually ready to start
1739 * using this ring (i.e. once we set the MTU
1740 * high enough to require it).
1741 */
1742 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1743 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1744 /* Get the jumbo receive producer ring RCB parameters. */
1745 rcb->bge_hostaddr.bge_addr_lo =
1746 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1747 rcb->bge_hostaddr.bge_addr_hi =
1748 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1749 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1750 sc->bge_cdata.bge_rx_jumbo_ring_map,
1751 BUS_DMASYNC_PREREAD);
1752 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1753 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1754 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1745 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1746 else
1747 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1748 /* Write the standard receive producer ring control block. */
1749 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1750 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1751 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1752 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1753
1754 /* Reset the standard receive producer ring producer index. */
1755 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1756
1757 /*
1758 * Initialize the jumbo RX producer ring control
1759 * block. We set the 'ring disabled' bit in the
1760 * flags field until we're actually ready to start
1761 * using this ring (i.e. once we set the MTU
1762 * high enough to require it).
1763 */
1764 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1765 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1766 /* Get the jumbo receive producer ring RCB parameters. */
1767 rcb->bge_hostaddr.bge_addr_lo =
1768 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1769 rcb->bge_hostaddr.bge_addr_hi =
1770 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1771 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1772 sc->bge_cdata.bge_rx_jumbo_ring_map,
1773 BUS_DMASYNC_PREREAD);
1774 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1775 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1776 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1755 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1777 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1778 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1756 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1757 else
1758 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1759 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1760 rcb->bge_hostaddr.bge_addr_hi);
1761 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1762 rcb->bge_hostaddr.bge_addr_lo);
1763 /* Program the jumbo receive producer ring RCB parameters. */
1764 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1765 rcb->bge_maxlen_flags);
1766 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1767 /* Reset the jumbo receive producer ring producer index. */
1768 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1769 }
1770
1771 /* Disable the mini receive producer ring RCB. */
1772 if (BGE_IS_5700_FAMILY(sc)) {
1773 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1774 rcb->bge_maxlen_flags =
1775 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1776 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1777 rcb->bge_maxlen_flags);
1778 /* Reset the mini receive producer ring producer index. */
1779 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1780 }
1781
1782 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1783 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1784 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1785 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1786 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1787 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1788 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1789 }
1790 /*
1791 * The BD ring replenish thresholds control how often the
1792 * hardware fetches new BD's from the producer rings in host
1793 * memory. Setting the value too low on a busy system can
1794 * starve the hardware and recue the throughpout.
1795 *
1796 * Set the BD ring replentish thresholds. The recommended
1797 * values are 1/8th the number of descriptors allocated to
1798 * each ring.
1799 * XXX The 5754 requires a lower threshold, so it might be a
1800 * requirement of all 575x family chips. The Linux driver sets
1801 * the lower threshold for all 5705 family chips as well, but there
1802 * are reports that it might not need to be so strict.
1803 *
1804 * XXX Linux does some extra fiddling here for the 5906 parts as
1805 * well.
1806 */
1807 if (BGE_IS_5705_PLUS(sc))
1808 val = 8;
1809 else
1810 val = BGE_STD_RX_RING_CNT / 8;
1811 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1812 if (BGE_IS_JUMBO_CAPABLE(sc))
1813 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1814 BGE_JUMBO_RX_RING_CNT/8);
1815 if (BGE_IS_5717_PLUS(sc)) {
1816 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1817 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1818 }
1819
1820 /*
1821 * Disable all send rings by setting the 'ring disabled' bit
1822 * in the flags field of all the TX send ring control blocks,
1823 * located in NIC memory.
1824 */
1825 if (!BGE_IS_5705_PLUS(sc))
1826 /* 5700 to 5704 had 16 send rings. */
1827 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1828 else
1829 limit = 1;
1830 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1831 for (i = 0; i < limit; i++) {
1832 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1833 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1834 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1835 vrcb += sizeof(struct bge_rcb);
1836 }
1837
1838 /* Configure send ring RCB 0 (we use only the first ring) */
1839 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1840 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1841 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1842 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1843 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1779 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1780 else
1781 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1782 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1783 rcb->bge_hostaddr.bge_addr_hi);
1784 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1785 rcb->bge_hostaddr.bge_addr_lo);
1786 /* Program the jumbo receive producer ring RCB parameters. */
1787 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1788 rcb->bge_maxlen_flags);
1789 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1790 /* Reset the jumbo receive producer ring producer index. */
1791 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1792 }
1793
1794 /* Disable the mini receive producer ring RCB. */
1795 if (BGE_IS_5700_FAMILY(sc)) {
1796 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1797 rcb->bge_maxlen_flags =
1798 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1799 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1800 rcb->bge_maxlen_flags);
1801 /* Reset the mini receive producer ring producer index. */
1802 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1803 }
1804
1805 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1806 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1807 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1808 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1809 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1810 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1811 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1812 }
1813 /*
1814 * The BD ring replenish thresholds control how often the
1815 * hardware fetches new BD's from the producer rings in host
1816 * memory. Setting the value too low on a busy system can
1817 * starve the hardware and recue the throughpout.
1818 *
1819 * Set the BD ring replentish thresholds. The recommended
1820 * values are 1/8th the number of descriptors allocated to
1821 * each ring.
1822 * XXX The 5754 requires a lower threshold, so it might be a
1823 * requirement of all 575x family chips. The Linux driver sets
1824 * the lower threshold for all 5705 family chips as well, but there
1825 * are reports that it might not need to be so strict.
1826 *
1827 * XXX Linux does some extra fiddling here for the 5906 parts as
1828 * well.
1829 */
1830 if (BGE_IS_5705_PLUS(sc))
1831 val = 8;
1832 else
1833 val = BGE_STD_RX_RING_CNT / 8;
1834 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1835 if (BGE_IS_JUMBO_CAPABLE(sc))
1836 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1837 BGE_JUMBO_RX_RING_CNT/8);
1838 if (BGE_IS_5717_PLUS(sc)) {
1839 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1840 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1841 }
1842
1843 /*
1844 * Disable all send rings by setting the 'ring disabled' bit
1845 * in the flags field of all the TX send ring control blocks,
1846 * located in NIC memory.
1847 */
1848 if (!BGE_IS_5705_PLUS(sc))
1849 /* 5700 to 5704 had 16 send rings. */
1850 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1851 else
1852 limit = 1;
1853 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1854 for (i = 0; i < limit; i++) {
1855 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1856 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1857 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1858 vrcb += sizeof(struct bge_rcb);
1859 }
1860
1861 /* Configure send ring RCB 0 (we use only the first ring) */
1862 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1863 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1864 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1865 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1866 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1844 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1867 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1868 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1845 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1846 else
1847 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1848 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1849 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1850 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1851
1852 /*
1853 * Disable all receive return rings by setting the
1854 * 'ring diabled' bit in the flags field of all the receive
1855 * return ring control blocks, located in NIC memory.
1856 */
1857 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1869 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1870 else
1871 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1872 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1873 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1874 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1875
1876 /*
1877 * Disable all receive return rings by setting the
1878 * 'ring diabled' bit in the flags field of all the receive
1879 * return ring control blocks, located in NIC memory.
1880 */
1881 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1858 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
1882 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1883 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
1859 /* Should be 17, use 16 until we get an SRAM map. */
1860 limit = 16;
1861 } else if (!BGE_IS_5705_PLUS(sc))
1862 limit = BGE_RX_RINGS_MAX;
1863 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1864 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1865 limit = 4;
1866 else
1867 limit = 1;
1868 /* Disable all receive return rings. */
1869 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1870 for (i = 0; i < limit; i++) {
1871 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1872 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1873 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1874 BGE_RCB_FLAG_RING_DISABLED);
1875 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1876 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1877 (i * (sizeof(uint64_t))), 0);
1878 vrcb += sizeof(struct bge_rcb);
1879 }
1880
1881 /*
1882 * Set up receive return ring 0. Note that the NIC address
1883 * for RX return rings is 0x0. The return rings live entirely
1884 * within the host, so the nicaddr field in the RCB isn't used.
1885 */
1886 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1887 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1888 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1889 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1890 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1891 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1892 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1893
1894 /* Set random backoff seed for TX */
1895 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1896 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1897 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1898 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1899 BGE_TX_BACKOFF_SEED_MASK);
1900
1901 /* Set inter-packet gap */
1884 /* Should be 17, use 16 until we get an SRAM map. */
1885 limit = 16;
1886 } else if (!BGE_IS_5705_PLUS(sc))
1887 limit = BGE_RX_RINGS_MAX;
1888 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1889 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1890 limit = 4;
1891 else
1892 limit = 1;
1893 /* Disable all receive return rings. */
1894 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1895 for (i = 0; i < limit; i++) {
1896 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1897 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1898 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1899 BGE_RCB_FLAG_RING_DISABLED);
1900 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1901 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1902 (i * (sizeof(uint64_t))), 0);
1903 vrcb += sizeof(struct bge_rcb);
1904 }
1905
1906 /*
1907 * Set up receive return ring 0. Note that the NIC address
1908 * for RX return rings is 0x0. The return rings live entirely
1909 * within the host, so the nicaddr field in the RCB isn't used.
1910 */
1911 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1912 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1913 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1914 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1915 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1916 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1917 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1918
1919 /* Set random backoff seed for TX */
1920 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1921 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1922 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1923 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1924 BGE_TX_BACKOFF_SEED_MASK);
1925
1926 /* Set inter-packet gap */
1902 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1927 val = 0x2620;
1928 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
1929 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1930 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1931 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1903
1904 /*
1905 * Specify which ring to use for packets that don't match
1906 * any RX rules.
1907 */
1908 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1909
1910 /*
1911 * Configure number of RX lists. One interrupt distribution
1912 * list, sixteen active lists, one bad frames class.
1913 */
1914 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1915
1916 /* Inialize RX list placement stats mask. */
1917 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1918 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1919
1920 /* Disable host coalescing until we get it set up */
1921 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1922
1923 /* Poll to make sure it's shut down. */
1924 for (i = 0; i < BGE_TIMEOUT; i++) {
1925 DELAY(10);
1926 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1927 break;
1928 }
1929
1930 if (i == BGE_TIMEOUT) {
1931 device_printf(sc->bge_dev,
1932 "host coalescing engine failed to idle\n");
1933 return (ENXIO);
1934 }
1935
1936 /* Set up host coalescing defaults */
1937 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1938 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1939 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1940 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1941 if (!(BGE_IS_5705_PLUS(sc))) {
1942 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1943 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1944 }
1945 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1946 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1947
1948 /* Set up address of statistics block */
1949 if (!(BGE_IS_5705_PLUS(sc))) {
1950 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1951 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1952 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1953 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1954 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1955 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1956 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1957 }
1958
1959 /* Set up address of status block */
1960 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1961 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1962 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1963 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1964
1965 /* Set up status block size. */
1966 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1967 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1968 val = BGE_STATBLKSZ_FULL;
1969 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1970 } else {
1971 val = BGE_STATBLKSZ_32BYTE;
1972 bzero(sc->bge_ldata.bge_status_block, 32);
1973 }
1974 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1975 sc->bge_cdata.bge_status_map,
1976 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1977
1978 /* Turn on host coalescing state machine */
1979 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1980
1981 /* Turn on RX BD completion state machine and enable attentions */
1982 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1983 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1984
1985 /* Turn on RX list placement state machine */
1986 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1987
1988 /* Turn on RX list selector state machine. */
1989 if (!(BGE_IS_5705_PLUS(sc)))
1990 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1991
1992 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1993 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1994 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1995 BGE_MACMODE_FRMHDR_DMA_ENB;
1996
1997 if (sc->bge_flags & BGE_FLAG_TBI)
1998 val |= BGE_PORTMODE_TBI;
1999 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2000 val |= BGE_PORTMODE_GMII;
2001 else
2002 val |= BGE_PORTMODE_MII;
2003
2004 /* Turn on DMA, clear stats */
2005 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2006
2007 /* Set misc. local control, enable interrupts on attentions */
2008 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2009
2010#ifdef notdef
2011 /* Assert GPIO pins for PHY reset */
2012 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2013 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2014 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2015 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2016#endif
2017
2018 /* Turn on DMA completion state machine */
2019 if (!(BGE_IS_5705_PLUS(sc)))
2020 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2021
2022 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2023
2024 /* Enable host coalescing bug fix. */
2025 if (BGE_IS_5755_PLUS(sc))
2026 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2027
2028 /* Request larger DMA burst size to get better performance. */
2029 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2030 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2031
2032 /* Turn on write DMA state machine */
2033 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2034 DELAY(40);
2035
2036 /* Turn on read DMA state machine */
2037 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2038
2039 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2040 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2041
2042 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2043 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2044 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2045 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2046 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2047 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2048 if (sc->bge_flags & BGE_FLAG_PCIE)
2049 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2050 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2051 val |= BGE_RDMAMODE_TSO4_ENABLE;
2052 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2053 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2054 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2055 val |= BGE_RDMAMODE_TSO6_ENABLE;
2056 }
1932
1933 /*
1934 * Specify which ring to use for packets that don't match
1935 * any RX rules.
1936 */
1937 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1938
1939 /*
1940 * Configure number of RX lists. One interrupt distribution
1941 * list, sixteen active lists, one bad frames class.
1942 */
1943 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1944
1945 /* Inialize RX list placement stats mask. */
1946 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1947 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1948
1949 /* Disable host coalescing until we get it set up */
1950 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1951
1952 /* Poll to make sure it's shut down. */
1953 for (i = 0; i < BGE_TIMEOUT; i++) {
1954 DELAY(10);
1955 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1956 break;
1957 }
1958
1959 if (i == BGE_TIMEOUT) {
1960 device_printf(sc->bge_dev,
1961 "host coalescing engine failed to idle\n");
1962 return (ENXIO);
1963 }
1964
1965 /* Set up host coalescing defaults */
1966 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1967 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1968 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1969 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1970 if (!(BGE_IS_5705_PLUS(sc))) {
1971 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1972 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1973 }
1974 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1975 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1976
1977 /* Set up address of statistics block */
1978 if (!(BGE_IS_5705_PLUS(sc))) {
1979 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1980 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1981 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1982 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1983 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1984 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1985 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1986 }
1987
1988 /* Set up address of status block */
1989 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1990 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1991 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1992 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1993
1994 /* Set up status block size. */
1995 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1996 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1997 val = BGE_STATBLKSZ_FULL;
1998 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1999 } else {
2000 val = BGE_STATBLKSZ_32BYTE;
2001 bzero(sc->bge_ldata.bge_status_block, 32);
2002 }
2003 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2004 sc->bge_cdata.bge_status_map,
2005 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2006
2007 /* Turn on host coalescing state machine */
2008 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2009
2010 /* Turn on RX BD completion state machine and enable attentions */
2011 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2012 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2013
2014 /* Turn on RX list placement state machine */
2015 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2016
2017 /* Turn on RX list selector state machine. */
2018 if (!(BGE_IS_5705_PLUS(sc)))
2019 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2020
2021 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2022 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2023 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2024 BGE_MACMODE_FRMHDR_DMA_ENB;
2025
2026 if (sc->bge_flags & BGE_FLAG_TBI)
2027 val |= BGE_PORTMODE_TBI;
2028 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2029 val |= BGE_PORTMODE_GMII;
2030 else
2031 val |= BGE_PORTMODE_MII;
2032
2033 /* Turn on DMA, clear stats */
2034 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2035
2036 /* Set misc. local control, enable interrupts on attentions */
2037 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2038
2039#ifdef notdef
2040 /* Assert GPIO pins for PHY reset */
2041 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2042 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2043 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2044 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2045#endif
2046
2047 /* Turn on DMA completion state machine */
2048 if (!(BGE_IS_5705_PLUS(sc)))
2049 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2050
2051 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2052
2053 /* Enable host coalescing bug fix. */
2054 if (BGE_IS_5755_PLUS(sc))
2055 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2056
2057 /* Request larger DMA burst size to get better performance. */
2058 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2059 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2060
2061 /* Turn on write DMA state machine */
2062 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2063 DELAY(40);
2064
2065 /* Turn on read DMA state machine */
2066 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2067
2068 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2069 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2070
2071 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2072 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2073 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2074 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2075 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2076 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2077 if (sc->bge_flags & BGE_FLAG_PCIE)
2078 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2079 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2080 val |= BGE_RDMAMODE_TSO4_ENABLE;
2081 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2082 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2083 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2084 val |= BGE_RDMAMODE_TSO6_ENABLE;
2085 }
2086
2087 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
2088 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2089 BGE_RDMAMODE_H2BNC_VLAN_DET;
2090
2057 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2058 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2059 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2060 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2061 BGE_IS_5717_PLUS(sc)) {
2062 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2063 /*
2064 * Adjust tx margin to prevent TX data corruption and
2065 * fix internal FIFO overflow.
2066 */
2091 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2092 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2093 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2094 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2095 BGE_IS_5717_PLUS(sc)) {
2096 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2097 /*
2098 * Adjust tx margin to prevent TX data corruption and
2099 * fix internal FIFO overflow.
2100 */
2067 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2101 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2102 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2068 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2069 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2070 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2071 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2072 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2073 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2074 }
2075 /*
2076 * Enable fix for read DMA FIFO overruns.
2077 * The fix is to limit the number of RX BDs
2078 * the hardware would fetch at a fime.
2079 */
2080 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2081 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2082 }
2083
2103 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2104 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2105 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2106 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2107 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2108 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2109 }
2110 /*
2111 * Enable fix for read DMA FIFO overruns.
2112 * The fix is to limit the number of RX BDs
2113 * the hardware would fetch at a fime.
2114 */
2115 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2116 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2117 }
2118
2084 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2119 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2120 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2085 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2086 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2087 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2088 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2089 }
2090
2091 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2092 DELAY(40);
2093
2094 /* Turn on RX data completion state machine */
2095 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2096
2097 /* Turn on RX BD initiator state machine */
2098 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2099
2100 /* Turn on RX data and RX BD initiator state machine */
2101 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2102
2103 /* Turn on Mbuf cluster free state machine */
2104 if (!(BGE_IS_5705_PLUS(sc)))
2105 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2106
2107 /* Turn on send BD completion state machine */
2108 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2109
2110 /* Turn on send data completion state machine */
2111 val = BGE_SDCMODE_ENABLE;
2112 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2113 val |= BGE_SDCMODE_CDELAY;
2114 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2115
2116 /* Turn on send data initiator state machine */
2117 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2118 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2119 BGE_SDIMODE_HW_LSO_PRE_DMA);
2120 else
2121 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2122
2123 /* Turn on send BD initiator state machine */
2124 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2125
2126 /* Turn on send BD selector state machine */
2127 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2128
2129 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2130 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2131 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2132
2133 /* ack/clear link change events */
2134 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2135 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2136 BGE_MACSTAT_LINK_CHANGED);
2137 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2138
2139 /*
2140 * Enable attention when the link has changed state for
2141 * devices that use auto polling.
2142 */
2143 if (sc->bge_flags & BGE_FLAG_TBI) {
2144 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2145 } else {
2146 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2147 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2148 DELAY(80);
2149 }
2150 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2151 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2152 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2153 BGE_EVTENB_MI_INTERRUPT);
2154 }
2155
2156 /*
2157 * Clear any pending link state attention.
2158 * Otherwise some link state change events may be lost until attention
2159 * is cleared by bge_intr() -> bge_link_upd() sequence.
2160 * It's not necessary on newer BCM chips - perhaps enabling link
2161 * state change attentions implies clearing pending attention.
2162 */
2163 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2164 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2165 BGE_MACSTAT_LINK_CHANGED);
2166
2167 /* Enable link state change attentions. */
2168 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2169
2170 return (0);
2171}
2172
2173const struct bge_revision *
2174bge_lookup_rev(uint32_t chipid)
2175{
2176 const struct bge_revision *br;
2177
2178 for (br = bge_revisions; br->br_name != NULL; br++) {
2179 if (br->br_chipid == chipid)
2180 return (br);
2181 }
2182
2183 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2184 if (br->br_chipid == BGE_ASICREV(chipid))
2185 return (br);
2186 }
2187
2188 return (NULL);
2189}
2190
2191const struct bge_vendor *
2192bge_lookup_vendor(uint16_t vid)
2193{
2194 const struct bge_vendor *v;
2195
2196 for (v = bge_vendors; v->v_name != NULL; v++)
2197 if (v->v_id == vid)
2198 return (v);
2199
2200 panic("%s: unknown vendor %d", __func__, vid);
2201 return (NULL);
2202}
2203
2204/*
2205 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2206 * against our list and return its name if we find a match.
2207 *
2208 * Note that since the Broadcom controller contains VPD support, we
2209 * try to get the device name string from the controller itself instead
2210 * of the compiled-in string. It guarantees we'll always announce the
2211 * right product name. We fall back to the compiled-in string when
2212 * VPD is unavailable or corrupt.
2213 */
2214static int
2215bge_probe(device_t dev)
2216{
2217 char buf[96];
2218 char model[64];
2219 const struct bge_revision *br;
2220 const char *pname;
2221 struct bge_softc *sc = device_get_softc(dev);
2222 const struct bge_type *t = bge_devs;
2223 const struct bge_vendor *v;
2224 uint32_t id;
2225 uint16_t did, vid;
2226
2227 sc->bge_dev = dev;
2228 vid = pci_get_vendor(dev);
2229 did = pci_get_device(dev);
2230 while(t->bge_vid != 0) {
2231 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2232 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2233 BGE_PCIMISCCTL_ASICREV_SHIFT;
2234 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2235 /*
2236 * Find the ASCI revision. Different chips
2237 * use different registers.
2238 */
2239 switch (pci_get_device(dev)) {
2240 case BCOM_DEVICEID_BCM5717:
2241 case BCOM_DEVICEID_BCM5718:
2242 case BCOM_DEVICEID_BCM5719:
2121 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2122 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2123 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2124 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2125 }
2126
2127 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2128 DELAY(40);
2129
2130 /* Turn on RX data completion state machine */
2131 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2132
2133 /* Turn on RX BD initiator state machine */
2134 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2135
2136 /* Turn on RX data and RX BD initiator state machine */
2137 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2138
2139 /* Turn on Mbuf cluster free state machine */
2140 if (!(BGE_IS_5705_PLUS(sc)))
2141 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2142
2143 /* Turn on send BD completion state machine */
2144 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2145
2146 /* Turn on send data completion state machine */
2147 val = BGE_SDCMODE_ENABLE;
2148 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2149 val |= BGE_SDCMODE_CDELAY;
2150 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2151
2152 /* Turn on send data initiator state machine */
2153 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2154 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2155 BGE_SDIMODE_HW_LSO_PRE_DMA);
2156 else
2157 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2158
2159 /* Turn on send BD initiator state machine */
2160 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2161
2162 /* Turn on send BD selector state machine */
2163 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2164
2165 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2166 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2167 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2168
2169 /* ack/clear link change events */
2170 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2171 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2172 BGE_MACSTAT_LINK_CHANGED);
2173 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2174
2175 /*
2176 * Enable attention when the link has changed state for
2177 * devices that use auto polling.
2178 */
2179 if (sc->bge_flags & BGE_FLAG_TBI) {
2180 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2181 } else {
2182 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2183 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2184 DELAY(80);
2185 }
2186 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2187 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2188 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2189 BGE_EVTENB_MI_INTERRUPT);
2190 }
2191
2192 /*
2193 * Clear any pending link state attention.
2194 * Otherwise some link state change events may be lost until attention
2195 * is cleared by bge_intr() -> bge_link_upd() sequence.
2196 * It's not necessary on newer BCM chips - perhaps enabling link
2197 * state change attentions implies clearing pending attention.
2198 */
2199 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2200 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2201 BGE_MACSTAT_LINK_CHANGED);
2202
2203 /* Enable link state change attentions. */
2204 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2205
2206 return (0);
2207}
2208
2209const struct bge_revision *
2210bge_lookup_rev(uint32_t chipid)
2211{
2212 const struct bge_revision *br;
2213
2214 for (br = bge_revisions; br->br_name != NULL; br++) {
2215 if (br->br_chipid == chipid)
2216 return (br);
2217 }
2218
2219 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2220 if (br->br_chipid == BGE_ASICREV(chipid))
2221 return (br);
2222 }
2223
2224 return (NULL);
2225}
2226
2227const struct bge_vendor *
2228bge_lookup_vendor(uint16_t vid)
2229{
2230 const struct bge_vendor *v;
2231
2232 for (v = bge_vendors; v->v_name != NULL; v++)
2233 if (v->v_id == vid)
2234 return (v);
2235
2236 panic("%s: unknown vendor %d", __func__, vid);
2237 return (NULL);
2238}
2239
2240/*
2241 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2242 * against our list and return its name if we find a match.
2243 *
2244 * Note that since the Broadcom controller contains VPD support, we
2245 * try to get the device name string from the controller itself instead
2246 * of the compiled-in string. It guarantees we'll always announce the
2247 * right product name. We fall back to the compiled-in string when
2248 * VPD is unavailable or corrupt.
2249 */
2250static int
2251bge_probe(device_t dev)
2252{
2253 char buf[96];
2254 char model[64];
2255 const struct bge_revision *br;
2256 const char *pname;
2257 struct bge_softc *sc = device_get_softc(dev);
2258 const struct bge_type *t = bge_devs;
2259 const struct bge_vendor *v;
2260 uint32_t id;
2261 uint16_t did, vid;
2262
2263 sc->bge_dev = dev;
2264 vid = pci_get_vendor(dev);
2265 did = pci_get_device(dev);
2266 while(t->bge_vid != 0) {
2267 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2268 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2269 BGE_PCIMISCCTL_ASICREV_SHIFT;
2270 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2271 /*
2272 * Find the ASCI revision. Different chips
2273 * use different registers.
2274 */
2275 switch (pci_get_device(dev)) {
2276 case BCOM_DEVICEID_BCM5717:
2277 case BCOM_DEVICEID_BCM5718:
2278 case BCOM_DEVICEID_BCM5719:
2279 case BCOM_DEVICEID_BCM5720:
2243 id = pci_read_config(dev,
2244 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2245 break;
2246 case BCOM_DEVICEID_BCM57761:
2247 case BCOM_DEVICEID_BCM57765:
2248 case BCOM_DEVICEID_BCM57781:
2249 case BCOM_DEVICEID_BCM57785:
2250 case BCOM_DEVICEID_BCM57791:
2251 case BCOM_DEVICEID_BCM57795:
2252 id = pci_read_config(dev,
2253 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2254 break;
2255 default:
2256 id = pci_read_config(dev,
2257 BGE_PCI_PRODID_ASICREV, 4);
2258 }
2259 }
2260 br = bge_lookup_rev(id);
2261 v = bge_lookup_vendor(vid);
2262 if (bge_has_eaddr(sc) &&
2263 pci_get_vpd_ident(dev, &pname) == 0)
2264 snprintf(model, 64, "%s", pname);
2265 else
2266 snprintf(model, 64, "%s %s", v->v_name,
2267 br != NULL ? br->br_name :
2268 "NetXtreme Ethernet Controller");
2269 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2270 br != NULL ? "" : "unknown ", id);
2271 device_set_desc_copy(dev, buf);
2272 return (0);
2273 }
2274 t++;
2275 }
2276
2277 return (ENXIO);
2278}
2279
2280static void
2281bge_dma_free(struct bge_softc *sc)
2282{
2283 int i;
2284
2285 /* Destroy DMA maps for RX buffers. */
2286 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2287 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2288 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2289 sc->bge_cdata.bge_rx_std_dmamap[i]);
2290 }
2291 if (sc->bge_cdata.bge_rx_std_sparemap)
2292 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2293 sc->bge_cdata.bge_rx_std_sparemap);
2294
2295 /* Destroy DMA maps for jumbo RX buffers. */
2296 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2297 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2298 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2299 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2300 }
2301 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2302 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2303 sc->bge_cdata.bge_rx_jumbo_sparemap);
2304
2305 /* Destroy DMA maps for TX buffers. */
2306 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2307 if (sc->bge_cdata.bge_tx_dmamap[i])
2308 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2309 sc->bge_cdata.bge_tx_dmamap[i]);
2310 }
2311
2312 if (sc->bge_cdata.bge_rx_mtag)
2313 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2314 if (sc->bge_cdata.bge_tx_mtag)
2315 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2316
2317
2318 /* Destroy standard RX ring. */
2319 if (sc->bge_cdata.bge_rx_std_ring_map)
2320 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2321 sc->bge_cdata.bge_rx_std_ring_map);
2322 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2323 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2324 sc->bge_ldata.bge_rx_std_ring,
2325 sc->bge_cdata.bge_rx_std_ring_map);
2326
2327 if (sc->bge_cdata.bge_rx_std_ring_tag)
2328 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2329
2330 /* Destroy jumbo RX ring. */
2331 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2332 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2333 sc->bge_cdata.bge_rx_jumbo_ring_map);
2334
2335 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2336 sc->bge_ldata.bge_rx_jumbo_ring)
2337 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2338 sc->bge_ldata.bge_rx_jumbo_ring,
2339 sc->bge_cdata.bge_rx_jumbo_ring_map);
2340
2341 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2342 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2343
2344 /* Destroy RX return ring. */
2345 if (sc->bge_cdata.bge_rx_return_ring_map)
2346 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2347 sc->bge_cdata.bge_rx_return_ring_map);
2348
2349 if (sc->bge_cdata.bge_rx_return_ring_map &&
2350 sc->bge_ldata.bge_rx_return_ring)
2351 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2352 sc->bge_ldata.bge_rx_return_ring,
2353 sc->bge_cdata.bge_rx_return_ring_map);
2354
2355 if (sc->bge_cdata.bge_rx_return_ring_tag)
2356 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2357
2358 /* Destroy TX ring. */
2359 if (sc->bge_cdata.bge_tx_ring_map)
2360 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2361 sc->bge_cdata.bge_tx_ring_map);
2362
2363 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2364 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2365 sc->bge_ldata.bge_tx_ring,
2366 sc->bge_cdata.bge_tx_ring_map);
2367
2368 if (sc->bge_cdata.bge_tx_ring_tag)
2369 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2370
2371 /* Destroy status block. */
2372 if (sc->bge_cdata.bge_status_map)
2373 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2374 sc->bge_cdata.bge_status_map);
2375
2376 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2377 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2378 sc->bge_ldata.bge_status_block,
2379 sc->bge_cdata.bge_status_map);
2380
2381 if (sc->bge_cdata.bge_status_tag)
2382 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2383
2384 /* Destroy statistics block. */
2385 if (sc->bge_cdata.bge_stats_map)
2386 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2387 sc->bge_cdata.bge_stats_map);
2388
2389 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2390 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2391 sc->bge_ldata.bge_stats,
2392 sc->bge_cdata.bge_stats_map);
2393
2394 if (sc->bge_cdata.bge_stats_tag)
2395 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2396
2397 if (sc->bge_cdata.bge_buffer_tag)
2398 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2399
2400 /* Destroy the parent tag. */
2401 if (sc->bge_cdata.bge_parent_tag)
2402 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2403}
2404
2405static int
2406bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2407 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2408 bus_addr_t *paddr, const char *msg)
2409{
2410 struct bge_dmamap_arg ctx;
2411 bus_addr_t lowaddr;
2412 bus_size_t ring_end;
2413 int error;
2414
2415 lowaddr = BUS_SPACE_MAXADDR;
2416again:
2417 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2418 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2419 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2420 if (error != 0) {
2421 device_printf(sc->bge_dev,
2422 "could not create %s dma tag\n", msg);
2423 return (ENOMEM);
2424 }
2425 /* Allocate DMA'able memory for ring. */
2426 error = bus_dmamem_alloc(*tag, (void **)ring,
2427 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2428 if (error != 0) {
2429 device_printf(sc->bge_dev,
2430 "could not allocate DMA'able memory for %s\n", msg);
2431 return (ENOMEM);
2432 }
2433 /* Load the address of the ring. */
2434 ctx.bge_busaddr = 0;
2435 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2436 &ctx, BUS_DMA_NOWAIT);
2437 if (error != 0) {
2438 device_printf(sc->bge_dev,
2439 "could not load DMA'able memory for %s\n", msg);
2440 return (ENOMEM);
2441 }
2442 *paddr = ctx.bge_busaddr;
2443 ring_end = *paddr + maxsize;
2444 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2445 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2446 /*
2447 * 4GB boundary crossed. Limit maximum allowable DMA
2448 * address space to 32bit and try again.
2449 */
2450 bus_dmamap_unload(*tag, *map);
2451 bus_dmamem_free(*tag, *ring, *map);
2452 bus_dma_tag_destroy(*tag);
2453 if (bootverbose)
2454 device_printf(sc->bge_dev, "4GB boundary crossed, "
2455 "limit DMA address space to 32bit for %s\n", msg);
2456 *ring = NULL;
2457 *tag = NULL;
2458 *map = NULL;
2459 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2460 goto again;
2461 }
2462 return (0);
2463}
2464
2465static int
2466bge_dma_alloc(struct bge_softc *sc)
2467{
2468 bus_addr_t lowaddr;
2469 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2470 int i, error;
2471
2472 lowaddr = BUS_SPACE_MAXADDR;
2473 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2474 lowaddr = BGE_DMA_MAXADDR;
2475 /*
2476 * Allocate the parent bus DMA tag appropriate for PCI.
2477 */
2478 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2479 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2480 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2481 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2482 if (error != 0) {
2483 device_printf(sc->bge_dev,
2484 "could not allocate parent dma tag\n");
2485 return (ENOMEM);
2486 }
2487
2488 /* Create tag for standard RX ring. */
2489 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2490 &sc->bge_cdata.bge_rx_std_ring_tag,
2491 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2492 &sc->bge_cdata.bge_rx_std_ring_map,
2493 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2494 if (error)
2495 return (error);
2496
2497 /* Create tag for RX return ring. */
2498 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2499 &sc->bge_cdata.bge_rx_return_ring_tag,
2500 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2501 &sc->bge_cdata.bge_rx_return_ring_map,
2502 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2503 if (error)
2504 return (error);
2505
2506 /* Create tag for TX ring. */
2507 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2508 &sc->bge_cdata.bge_tx_ring_tag,
2509 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2510 &sc->bge_cdata.bge_tx_ring_map,
2511 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2512 if (error)
2513 return (error);
2514
2515 /*
2516 * Create tag for status block.
2517 * Because we only use single Tx/Rx/Rx return ring, use
2518 * minimum status block size except BCM5700 AX/BX which
2519 * seems to want to see full status block size regardless
2520 * of configured number of ring.
2521 */
2522 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2523 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2524 sbsz = BGE_STATUS_BLK_SZ;
2525 else
2526 sbsz = 32;
2527 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2528 &sc->bge_cdata.bge_status_tag,
2529 (uint8_t **)&sc->bge_ldata.bge_status_block,
2530 &sc->bge_cdata.bge_status_map,
2531 &sc->bge_ldata.bge_status_block_paddr, "status block");
2532 if (error)
2533 return (error);
2534
2535 /* Create tag for statistics block. */
2536 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2537 &sc->bge_cdata.bge_stats_tag,
2538 (uint8_t **)&sc->bge_ldata.bge_stats,
2539 &sc->bge_cdata.bge_stats_map,
2540 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2541 if (error)
2542 return (error);
2543
2544 /* Create tag for jumbo RX ring. */
2545 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2546 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2547 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2548 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2549 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2550 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2551 if (error)
2552 return (error);
2553 }
2554
2555 /* Create parent tag for buffers. */
2556 boundary = 0;
2557 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2558 boundary = BGE_DMA_BNDRY;
2559 /*
2560 * XXX
2561 * watchdog timeout issue was observed on BCM5704 which
2562 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2563 * Limiting DMA address space to 32bits seems to address
2564 * it.
2565 */
2566 if (sc->bge_flags & BGE_FLAG_PCIX)
2567 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2568 }
2569 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2570 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2571 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2572 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2573 if (error != 0) {
2574 device_printf(sc->bge_dev,
2575 "could not allocate buffer dma tag\n");
2576 return (ENOMEM);
2577 }
2578 /* Create tag for Tx mbufs. */
2579 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2580 txsegsz = BGE_TSOSEG_SZ;
2581 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2582 } else {
2583 txsegsz = MCLBYTES;
2584 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2585 }
2586 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2587 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2588 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2589 &sc->bge_cdata.bge_tx_mtag);
2590
2591 if (error) {
2592 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2593 return (ENOMEM);
2594 }
2595
2596 /* Create tag for Rx mbufs. */
2597 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2598 rxmaxsegsz = MJUM9BYTES;
2599 else
2600 rxmaxsegsz = MCLBYTES;
2601 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2602 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2603 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2604
2605 if (error) {
2606 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2607 return (ENOMEM);
2608 }
2609
2610 /* Create DMA maps for RX buffers. */
2611 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2612 &sc->bge_cdata.bge_rx_std_sparemap);
2613 if (error) {
2614 device_printf(sc->bge_dev,
2615 "can't create spare DMA map for RX\n");
2616 return (ENOMEM);
2617 }
2618 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2619 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2620 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2621 if (error) {
2622 device_printf(sc->bge_dev,
2623 "can't create DMA map for RX\n");
2624 return (ENOMEM);
2625 }
2626 }
2627
2628 /* Create DMA maps for TX buffers. */
2629 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2630 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2631 &sc->bge_cdata.bge_tx_dmamap[i]);
2632 if (error) {
2633 device_printf(sc->bge_dev,
2634 "can't create DMA map for TX\n");
2635 return (ENOMEM);
2636 }
2637 }
2638
2639 /* Create tags for jumbo RX buffers. */
2640 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2641 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2642 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2643 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2644 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2645 if (error) {
2646 device_printf(sc->bge_dev,
2647 "could not allocate jumbo dma tag\n");
2648 return (ENOMEM);
2649 }
2650 /* Create DMA maps for jumbo RX buffers. */
2651 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2652 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2653 if (error) {
2654 device_printf(sc->bge_dev,
2655 "can't create spare DMA map for jumbo RX\n");
2656 return (ENOMEM);
2657 }
2658 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2659 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2660 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2661 if (error) {
2662 device_printf(sc->bge_dev,
2663 "can't create DMA map for jumbo RX\n");
2664 return (ENOMEM);
2665 }
2666 }
2667 }
2668
2669 return (0);
2670}
2671
2672/*
2673 * Return true if this device has more than one port.
2674 */
2675static int
2676bge_has_multiple_ports(struct bge_softc *sc)
2677{
2678 device_t dev = sc->bge_dev;
2679 u_int b, d, f, fscan, s;
2680
2681 d = pci_get_domain(dev);
2682 b = pci_get_bus(dev);
2683 s = pci_get_slot(dev);
2684 f = pci_get_function(dev);
2685 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2686 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2687 return (1);
2688 return (0);
2689}
2690
2691/*
2692 * Return true if MSI can be used with this device.
2693 */
2694static int
2695bge_can_use_msi(struct bge_softc *sc)
2696{
2697 int can_use_msi = 0;
2698
2699 /* Disable MSI for polling(4). */
2700#ifdef DEVICE_POLLING
2701 return (0);
2702#endif
2703 switch (sc->bge_asicrev) {
2704 case BGE_ASICREV_BCM5714_A0:
2705 case BGE_ASICREV_BCM5714:
2706 /*
2707 * Apparently, MSI doesn't work when these chips are
2708 * configured in single-port mode.
2709 */
2710 if (bge_has_multiple_ports(sc))
2711 can_use_msi = 1;
2712 break;
2713 case BGE_ASICREV_BCM5750:
2714 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2715 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2716 can_use_msi = 1;
2717 break;
2718 default:
2719 if (BGE_IS_575X_PLUS(sc))
2720 can_use_msi = 1;
2721 }
2722 return (can_use_msi);
2723}
2724
2725static int
2726bge_attach(device_t dev)
2727{
2728 struct ifnet *ifp;
2729 struct bge_softc *sc;
2730 uint32_t hwcfg = 0, misccfg;
2731 u_char eaddr[ETHER_ADDR_LEN];
2732 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2733
2734 sc = device_get_softc(dev);
2735 sc->bge_dev = dev;
2736
2737 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2738
2739 /*
2740 * Map control/status registers.
2741 */
2742 pci_enable_busmaster(dev);
2743
2744 rid = PCIR_BAR(0);
2745 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2746 RF_ACTIVE);
2747
2748 if (sc->bge_res == NULL) {
2749 device_printf (sc->bge_dev, "couldn't map memory\n");
2750 error = ENXIO;
2751 goto fail;
2752 }
2753
2754 /* Save various chip information. */
2755 sc->bge_chipid =
2756 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2757 BGE_PCIMISCCTL_ASICREV_SHIFT;
2758 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2759 /*
2760 * Find the ASCI revision. Different chips use different
2761 * registers.
2762 */
2763 switch (pci_get_device(dev)) {
2764 case BCOM_DEVICEID_BCM5717:
2765 case BCOM_DEVICEID_BCM5718:
2766 case BCOM_DEVICEID_BCM5719:
2280 id = pci_read_config(dev,
2281 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2282 break;
2283 case BCOM_DEVICEID_BCM57761:
2284 case BCOM_DEVICEID_BCM57765:
2285 case BCOM_DEVICEID_BCM57781:
2286 case BCOM_DEVICEID_BCM57785:
2287 case BCOM_DEVICEID_BCM57791:
2288 case BCOM_DEVICEID_BCM57795:
2289 id = pci_read_config(dev,
2290 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2291 break;
2292 default:
2293 id = pci_read_config(dev,
2294 BGE_PCI_PRODID_ASICREV, 4);
2295 }
2296 }
2297 br = bge_lookup_rev(id);
2298 v = bge_lookup_vendor(vid);
2299 if (bge_has_eaddr(sc) &&
2300 pci_get_vpd_ident(dev, &pname) == 0)
2301 snprintf(model, 64, "%s", pname);
2302 else
2303 snprintf(model, 64, "%s %s", v->v_name,
2304 br != NULL ? br->br_name :
2305 "NetXtreme Ethernet Controller");
2306 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2307 br != NULL ? "" : "unknown ", id);
2308 device_set_desc_copy(dev, buf);
2309 return (0);
2310 }
2311 t++;
2312 }
2313
2314 return (ENXIO);
2315}
2316
2317static void
2318bge_dma_free(struct bge_softc *sc)
2319{
2320 int i;
2321
2322 /* Destroy DMA maps for RX buffers. */
2323 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2324 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2325 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2326 sc->bge_cdata.bge_rx_std_dmamap[i]);
2327 }
2328 if (sc->bge_cdata.bge_rx_std_sparemap)
2329 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2330 sc->bge_cdata.bge_rx_std_sparemap);
2331
2332 /* Destroy DMA maps for jumbo RX buffers. */
2333 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2334 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2335 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2336 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2337 }
2338 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2339 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2340 sc->bge_cdata.bge_rx_jumbo_sparemap);
2341
2342 /* Destroy DMA maps for TX buffers. */
2343 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2344 if (sc->bge_cdata.bge_tx_dmamap[i])
2345 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2346 sc->bge_cdata.bge_tx_dmamap[i]);
2347 }
2348
2349 if (sc->bge_cdata.bge_rx_mtag)
2350 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2351 if (sc->bge_cdata.bge_tx_mtag)
2352 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2353
2354
2355 /* Destroy standard RX ring. */
2356 if (sc->bge_cdata.bge_rx_std_ring_map)
2357 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2358 sc->bge_cdata.bge_rx_std_ring_map);
2359 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2360 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2361 sc->bge_ldata.bge_rx_std_ring,
2362 sc->bge_cdata.bge_rx_std_ring_map);
2363
2364 if (sc->bge_cdata.bge_rx_std_ring_tag)
2365 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2366
2367 /* Destroy jumbo RX ring. */
2368 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2369 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2370 sc->bge_cdata.bge_rx_jumbo_ring_map);
2371
2372 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2373 sc->bge_ldata.bge_rx_jumbo_ring)
2374 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2375 sc->bge_ldata.bge_rx_jumbo_ring,
2376 sc->bge_cdata.bge_rx_jumbo_ring_map);
2377
2378 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2379 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2380
2381 /* Destroy RX return ring. */
2382 if (sc->bge_cdata.bge_rx_return_ring_map)
2383 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2384 sc->bge_cdata.bge_rx_return_ring_map);
2385
2386 if (sc->bge_cdata.bge_rx_return_ring_map &&
2387 sc->bge_ldata.bge_rx_return_ring)
2388 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2389 sc->bge_ldata.bge_rx_return_ring,
2390 sc->bge_cdata.bge_rx_return_ring_map);
2391
2392 if (sc->bge_cdata.bge_rx_return_ring_tag)
2393 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2394
2395 /* Destroy TX ring. */
2396 if (sc->bge_cdata.bge_tx_ring_map)
2397 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2398 sc->bge_cdata.bge_tx_ring_map);
2399
2400 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2401 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2402 sc->bge_ldata.bge_tx_ring,
2403 sc->bge_cdata.bge_tx_ring_map);
2404
2405 if (sc->bge_cdata.bge_tx_ring_tag)
2406 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2407
2408 /* Destroy status block. */
2409 if (sc->bge_cdata.bge_status_map)
2410 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2411 sc->bge_cdata.bge_status_map);
2412
2413 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2414 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2415 sc->bge_ldata.bge_status_block,
2416 sc->bge_cdata.bge_status_map);
2417
2418 if (sc->bge_cdata.bge_status_tag)
2419 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2420
2421 /* Destroy statistics block. */
2422 if (sc->bge_cdata.bge_stats_map)
2423 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2424 sc->bge_cdata.bge_stats_map);
2425
2426 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2427 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2428 sc->bge_ldata.bge_stats,
2429 sc->bge_cdata.bge_stats_map);
2430
2431 if (sc->bge_cdata.bge_stats_tag)
2432 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2433
2434 if (sc->bge_cdata.bge_buffer_tag)
2435 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2436
2437 /* Destroy the parent tag. */
2438 if (sc->bge_cdata.bge_parent_tag)
2439 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2440}
2441
2442static int
2443bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2444 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2445 bus_addr_t *paddr, const char *msg)
2446{
2447 struct bge_dmamap_arg ctx;
2448 bus_addr_t lowaddr;
2449 bus_size_t ring_end;
2450 int error;
2451
2452 lowaddr = BUS_SPACE_MAXADDR;
2453again:
2454 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2455 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2456 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2457 if (error != 0) {
2458 device_printf(sc->bge_dev,
2459 "could not create %s dma tag\n", msg);
2460 return (ENOMEM);
2461 }
2462 /* Allocate DMA'able memory for ring. */
2463 error = bus_dmamem_alloc(*tag, (void **)ring,
2464 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2465 if (error != 0) {
2466 device_printf(sc->bge_dev,
2467 "could not allocate DMA'able memory for %s\n", msg);
2468 return (ENOMEM);
2469 }
2470 /* Load the address of the ring. */
2471 ctx.bge_busaddr = 0;
2472 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2473 &ctx, BUS_DMA_NOWAIT);
2474 if (error != 0) {
2475 device_printf(sc->bge_dev,
2476 "could not load DMA'able memory for %s\n", msg);
2477 return (ENOMEM);
2478 }
2479 *paddr = ctx.bge_busaddr;
2480 ring_end = *paddr + maxsize;
2481 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2482 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2483 /*
2484 * 4GB boundary crossed. Limit maximum allowable DMA
2485 * address space to 32bit and try again.
2486 */
2487 bus_dmamap_unload(*tag, *map);
2488 bus_dmamem_free(*tag, *ring, *map);
2489 bus_dma_tag_destroy(*tag);
2490 if (bootverbose)
2491 device_printf(sc->bge_dev, "4GB boundary crossed, "
2492 "limit DMA address space to 32bit for %s\n", msg);
2493 *ring = NULL;
2494 *tag = NULL;
2495 *map = NULL;
2496 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2497 goto again;
2498 }
2499 return (0);
2500}
2501
2502static int
2503bge_dma_alloc(struct bge_softc *sc)
2504{
2505 bus_addr_t lowaddr;
2506 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2507 int i, error;
2508
2509 lowaddr = BUS_SPACE_MAXADDR;
2510 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2511 lowaddr = BGE_DMA_MAXADDR;
2512 /*
2513 * Allocate the parent bus DMA tag appropriate for PCI.
2514 */
2515 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2516 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2517 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2518 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2519 if (error != 0) {
2520 device_printf(sc->bge_dev,
2521 "could not allocate parent dma tag\n");
2522 return (ENOMEM);
2523 }
2524
2525 /* Create tag for standard RX ring. */
2526 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2527 &sc->bge_cdata.bge_rx_std_ring_tag,
2528 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2529 &sc->bge_cdata.bge_rx_std_ring_map,
2530 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2531 if (error)
2532 return (error);
2533
2534 /* Create tag for RX return ring. */
2535 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2536 &sc->bge_cdata.bge_rx_return_ring_tag,
2537 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2538 &sc->bge_cdata.bge_rx_return_ring_map,
2539 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2540 if (error)
2541 return (error);
2542
2543 /* Create tag for TX ring. */
2544 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2545 &sc->bge_cdata.bge_tx_ring_tag,
2546 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2547 &sc->bge_cdata.bge_tx_ring_map,
2548 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2549 if (error)
2550 return (error);
2551
2552 /*
2553 * Create tag for status block.
2554 * Because we only use single Tx/Rx/Rx return ring, use
2555 * minimum status block size except BCM5700 AX/BX which
2556 * seems to want to see full status block size regardless
2557 * of configured number of ring.
2558 */
2559 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2560 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2561 sbsz = BGE_STATUS_BLK_SZ;
2562 else
2563 sbsz = 32;
2564 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2565 &sc->bge_cdata.bge_status_tag,
2566 (uint8_t **)&sc->bge_ldata.bge_status_block,
2567 &sc->bge_cdata.bge_status_map,
2568 &sc->bge_ldata.bge_status_block_paddr, "status block");
2569 if (error)
2570 return (error);
2571
2572 /* Create tag for statistics block. */
2573 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2574 &sc->bge_cdata.bge_stats_tag,
2575 (uint8_t **)&sc->bge_ldata.bge_stats,
2576 &sc->bge_cdata.bge_stats_map,
2577 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2578 if (error)
2579 return (error);
2580
2581 /* Create tag for jumbo RX ring. */
2582 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2583 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2584 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2585 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2586 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2587 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2588 if (error)
2589 return (error);
2590 }
2591
2592 /* Create parent tag for buffers. */
2593 boundary = 0;
2594 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2595 boundary = BGE_DMA_BNDRY;
2596 /*
2597 * XXX
2598 * watchdog timeout issue was observed on BCM5704 which
2599 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2600 * Limiting DMA address space to 32bits seems to address
2601 * it.
2602 */
2603 if (sc->bge_flags & BGE_FLAG_PCIX)
2604 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2605 }
2606 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2607 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2608 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2609 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2610 if (error != 0) {
2611 device_printf(sc->bge_dev,
2612 "could not allocate buffer dma tag\n");
2613 return (ENOMEM);
2614 }
2615 /* Create tag for Tx mbufs. */
2616 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2617 txsegsz = BGE_TSOSEG_SZ;
2618 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2619 } else {
2620 txsegsz = MCLBYTES;
2621 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2622 }
2623 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2624 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2625 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2626 &sc->bge_cdata.bge_tx_mtag);
2627
2628 if (error) {
2629 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2630 return (ENOMEM);
2631 }
2632
2633 /* Create tag for Rx mbufs. */
2634 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2635 rxmaxsegsz = MJUM9BYTES;
2636 else
2637 rxmaxsegsz = MCLBYTES;
2638 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2639 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2640 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2641
2642 if (error) {
2643 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2644 return (ENOMEM);
2645 }
2646
2647 /* Create DMA maps for RX buffers. */
2648 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2649 &sc->bge_cdata.bge_rx_std_sparemap);
2650 if (error) {
2651 device_printf(sc->bge_dev,
2652 "can't create spare DMA map for RX\n");
2653 return (ENOMEM);
2654 }
2655 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2656 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2657 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2658 if (error) {
2659 device_printf(sc->bge_dev,
2660 "can't create DMA map for RX\n");
2661 return (ENOMEM);
2662 }
2663 }
2664
2665 /* Create DMA maps for TX buffers. */
2666 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2667 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2668 &sc->bge_cdata.bge_tx_dmamap[i]);
2669 if (error) {
2670 device_printf(sc->bge_dev,
2671 "can't create DMA map for TX\n");
2672 return (ENOMEM);
2673 }
2674 }
2675
2676 /* Create tags for jumbo RX buffers. */
2677 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2678 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2679 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2680 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2681 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2682 if (error) {
2683 device_printf(sc->bge_dev,
2684 "could not allocate jumbo dma tag\n");
2685 return (ENOMEM);
2686 }
2687 /* Create DMA maps for jumbo RX buffers. */
2688 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2689 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2690 if (error) {
2691 device_printf(sc->bge_dev,
2692 "can't create spare DMA map for jumbo RX\n");
2693 return (ENOMEM);
2694 }
2695 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2696 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2697 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2698 if (error) {
2699 device_printf(sc->bge_dev,
2700 "can't create DMA map for jumbo RX\n");
2701 return (ENOMEM);
2702 }
2703 }
2704 }
2705
2706 return (0);
2707}
2708
2709/*
2710 * Return true if this device has more than one port.
2711 */
2712static int
2713bge_has_multiple_ports(struct bge_softc *sc)
2714{
2715 device_t dev = sc->bge_dev;
2716 u_int b, d, f, fscan, s;
2717
2718 d = pci_get_domain(dev);
2719 b = pci_get_bus(dev);
2720 s = pci_get_slot(dev);
2721 f = pci_get_function(dev);
2722 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2723 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2724 return (1);
2725 return (0);
2726}
2727
2728/*
2729 * Return true if MSI can be used with this device.
2730 */
2731static int
2732bge_can_use_msi(struct bge_softc *sc)
2733{
2734 int can_use_msi = 0;
2735
2736 /* Disable MSI for polling(4). */
2737#ifdef DEVICE_POLLING
2738 return (0);
2739#endif
2740 switch (sc->bge_asicrev) {
2741 case BGE_ASICREV_BCM5714_A0:
2742 case BGE_ASICREV_BCM5714:
2743 /*
2744 * Apparently, MSI doesn't work when these chips are
2745 * configured in single-port mode.
2746 */
2747 if (bge_has_multiple_ports(sc))
2748 can_use_msi = 1;
2749 break;
2750 case BGE_ASICREV_BCM5750:
2751 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2752 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2753 can_use_msi = 1;
2754 break;
2755 default:
2756 if (BGE_IS_575X_PLUS(sc))
2757 can_use_msi = 1;
2758 }
2759 return (can_use_msi);
2760}
2761
2762static int
2763bge_attach(device_t dev)
2764{
2765 struct ifnet *ifp;
2766 struct bge_softc *sc;
2767 uint32_t hwcfg = 0, misccfg;
2768 u_char eaddr[ETHER_ADDR_LEN];
2769 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2770
2771 sc = device_get_softc(dev);
2772 sc->bge_dev = dev;
2773
2774 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2775
2776 /*
2777 * Map control/status registers.
2778 */
2779 pci_enable_busmaster(dev);
2780
2781 rid = PCIR_BAR(0);
2782 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2783 RF_ACTIVE);
2784
2785 if (sc->bge_res == NULL) {
2786 device_printf (sc->bge_dev, "couldn't map memory\n");
2787 error = ENXIO;
2788 goto fail;
2789 }
2790
2791 /* Save various chip information. */
2792 sc->bge_chipid =
2793 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2794 BGE_PCIMISCCTL_ASICREV_SHIFT;
2795 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2796 /*
2797 * Find the ASCI revision. Different chips use different
2798 * registers.
2799 */
2800 switch (pci_get_device(dev)) {
2801 case BCOM_DEVICEID_BCM5717:
2802 case BCOM_DEVICEID_BCM5718:
2803 case BCOM_DEVICEID_BCM5719:
2804 case BCOM_DEVICEID_BCM5720:
2767 sc->bge_chipid = pci_read_config(dev,
2768 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2769 break;
2770 case BCOM_DEVICEID_BCM57761:
2771 case BCOM_DEVICEID_BCM57765:
2772 case BCOM_DEVICEID_BCM57781:
2773 case BCOM_DEVICEID_BCM57785:
2774 case BCOM_DEVICEID_BCM57791:
2775 case BCOM_DEVICEID_BCM57795:
2776 sc->bge_chipid = pci_read_config(dev,
2777 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2778 break;
2779 default:
2780 sc->bge_chipid = pci_read_config(dev,
2781 BGE_PCI_PRODID_ASICREV, 4);
2782 }
2783 }
2784 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2785 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2786
2787 /* Set default PHY address. */
2788 phy_addr = 1;
2789 /*
2790 * PHY address mapping for various devices.
2791 *
2792 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2793 * ---------+-------+-------+-------+-------+
2794 * BCM57XX | 1 | X | X | X |
2795 * BCM5704 | 1 | X | 1 | X |
2796 * BCM5717 | 1 | 8 | 2 | 9 |
2797 * BCM5719 | 1 | 8 | 2 | 9 |
2805 sc->bge_chipid = pci_read_config(dev,
2806 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2807 break;
2808 case BCOM_DEVICEID_BCM57761:
2809 case BCOM_DEVICEID_BCM57765:
2810 case BCOM_DEVICEID_BCM57781:
2811 case BCOM_DEVICEID_BCM57785:
2812 case BCOM_DEVICEID_BCM57791:
2813 case BCOM_DEVICEID_BCM57795:
2814 sc->bge_chipid = pci_read_config(dev,
2815 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2816 break;
2817 default:
2818 sc->bge_chipid = pci_read_config(dev,
2819 BGE_PCI_PRODID_ASICREV, 4);
2820 }
2821 }
2822 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2823 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2824
2825 /* Set default PHY address. */
2826 phy_addr = 1;
2827 /*
2828 * PHY address mapping for various devices.
2829 *
2830 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2831 * ---------+-------+-------+-------+-------+
2832 * BCM57XX | 1 | X | X | X |
2833 * BCM5704 | 1 | X | 1 | X |
2834 * BCM5717 | 1 | 8 | 2 | 9 |
2835 * BCM5719 | 1 | 8 | 2 | 9 |
2836 * BCM5720 | 1 | 8 | 2 | 9 |
2798 *
2799 * Other addresses may respond but they are not
2800 * IEEE compliant PHYs and should be ignored.
2801 */
2802 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2837 *
2838 * Other addresses may respond but they are not
2839 * IEEE compliant PHYs and should be ignored.
2840 */
2841 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2803 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2842 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2843 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2804 f = pci_get_function(dev);
2805 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2806 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2807 BGE_SGDIGSTS_IS_SERDES)
2808 phy_addr = f + 8;
2809 else
2810 phy_addr = f + 1;
2811 } else {
2812 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2813 BGE_CPMU_PHY_STRAP_IS_SERDES)
2814 phy_addr = f + 8;
2815 else
2816 phy_addr = f + 1;
2817 }
2818 }
2819
2820 /*
2821 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2822 * 5705 A0 and A1 chips.
2823 */
2824 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2825 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2826 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2827 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2828 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2829 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2830
2831 if (bge_has_eaddr(sc))
2832 sc->bge_flags |= BGE_FLAG_EADDR;
2833
2834 /* Save chipset family. */
2835 switch (sc->bge_asicrev) {
2836 case BGE_ASICREV_BCM5717:
2837 case BGE_ASICREV_BCM5719:
2844 f = pci_get_function(dev);
2845 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2846 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2847 BGE_SGDIGSTS_IS_SERDES)
2848 phy_addr = f + 8;
2849 else
2850 phy_addr = f + 1;
2851 } else {
2852 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2853 BGE_CPMU_PHY_STRAP_IS_SERDES)
2854 phy_addr = f + 8;
2855 else
2856 phy_addr = f + 1;
2857 }
2858 }
2859
2860 /*
2861 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2862 * 5705 A0 and A1 chips.
2863 */
2864 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2865 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2866 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2867 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2868 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2869 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2870
2871 if (bge_has_eaddr(sc))
2872 sc->bge_flags |= BGE_FLAG_EADDR;
2873
2874 /* Save chipset family. */
2875 switch (sc->bge_asicrev) {
2876 case BGE_ASICREV_BCM5717:
2877 case BGE_ASICREV_BCM5719:
2878 case BGE_ASICREV_BCM5720:
2838 case BGE_ASICREV_BCM57765:
2839 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2840 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2841 BGE_FLAG_JUMBO_FRAME;
2842 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2843 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2844 /* Jumbo frame on BCM5719 A0 does not work. */
2845 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2846 }
2847 break;
2848 case BGE_ASICREV_BCM5755:
2849 case BGE_ASICREV_BCM5761:
2850 case BGE_ASICREV_BCM5784:
2851 case BGE_ASICREV_BCM5785:
2852 case BGE_ASICREV_BCM5787:
2853 case BGE_ASICREV_BCM57780:
2854 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2855 BGE_FLAG_5705_PLUS;
2856 break;
2857 case BGE_ASICREV_BCM5700:
2858 case BGE_ASICREV_BCM5701:
2859 case BGE_ASICREV_BCM5703:
2860 case BGE_ASICREV_BCM5704:
2861 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2862 break;
2863 case BGE_ASICREV_BCM5714_A0:
2864 case BGE_ASICREV_BCM5780:
2865 case BGE_ASICREV_BCM5714:
2866 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2867 /* FALLTHROUGH */
2868 case BGE_ASICREV_BCM5750:
2869 case BGE_ASICREV_BCM5752:
2870 case BGE_ASICREV_BCM5906:
2871 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2872 /* FALLTHROUGH */
2873 case BGE_ASICREV_BCM5705:
2874 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2875 break;
2876 }
2877
2878 /* Set various PHY bug flags. */
2879 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2880 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2881 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2882 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2883 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2884 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2885 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2886 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2887 if (pci_get_subvendor(dev) == DELL_VENDORID)
2888 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2889 if ((BGE_IS_5705_PLUS(sc)) &&
2890 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2891 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2892 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2879 case BGE_ASICREV_BCM57765:
2880 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2881 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2882 BGE_FLAG_JUMBO_FRAME;
2883 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2884 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2885 /* Jumbo frame on BCM5719 A0 does not work. */
2886 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2887 }
2888 break;
2889 case BGE_ASICREV_BCM5755:
2890 case BGE_ASICREV_BCM5761:
2891 case BGE_ASICREV_BCM5784:
2892 case BGE_ASICREV_BCM5785:
2893 case BGE_ASICREV_BCM5787:
2894 case BGE_ASICREV_BCM57780:
2895 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2896 BGE_FLAG_5705_PLUS;
2897 break;
2898 case BGE_ASICREV_BCM5700:
2899 case BGE_ASICREV_BCM5701:
2900 case BGE_ASICREV_BCM5703:
2901 case BGE_ASICREV_BCM5704:
2902 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2903 break;
2904 case BGE_ASICREV_BCM5714_A0:
2905 case BGE_ASICREV_BCM5780:
2906 case BGE_ASICREV_BCM5714:
2907 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2908 /* FALLTHROUGH */
2909 case BGE_ASICREV_BCM5750:
2910 case BGE_ASICREV_BCM5752:
2911 case BGE_ASICREV_BCM5906:
2912 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2913 /* FALLTHROUGH */
2914 case BGE_ASICREV_BCM5705:
2915 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2916 break;
2917 }
2918
2919 /* Set various PHY bug flags. */
2920 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2921 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2922 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2923 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2924 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2925 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2926 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2927 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2928 if (pci_get_subvendor(dev) == DELL_VENDORID)
2929 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2930 if ((BGE_IS_5705_PLUS(sc)) &&
2931 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2932 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2933 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2934 sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
2893 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2894 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2895 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2896 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2897 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2898 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2899 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2900 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2901 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2902 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2903 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2904 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2905 } else
2906 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2907 }
2908
2909 /* Identify the chips that use an CPMU. */
2910 if (BGE_IS_5717_PLUS(sc) ||
2911 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2912 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2913 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2914 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2915 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2916 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2917 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2918 else
2919 sc->bge_mi_mode = BGE_MIMODE_BASE;
2920 /* Enable auto polling for BCM570[0-5]. */
2921 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2922 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2923
2924 /*
2925 * All Broadcom controllers have 4GB boundary DMA bug.
2926 * Whenever an address crosses a multiple of the 4GB boundary
2927 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2928 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2929 * state machine will lockup and cause the device to hang.
2930 */
2931 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2932
2933 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2934 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2935 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2936
2937 /*
2938 * BCM5719 cannot handle DMA requests for DMA segments that
2939 * have larger than 4KB in size. However the maximum DMA
2940 * segment size created in DMA tag is 4KB for TSO, so we
2941 * wouldn't encounter the issue here.
2942 */
2943 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2944 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
2945
2946 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2947 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2948 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2949 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2950 sc->bge_flags |= BGE_FLAG_5788;
2951 }
2952
2953 capmask = BMSR_DEFCAPMASK;
2954 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2955 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2956 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2957 pci_get_vendor(dev) == BCOM_VENDORID &&
2958 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2959 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2960 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2961 (pci_get_vendor(dev) == BCOM_VENDORID &&
2962 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2963 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2964 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2965 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2966 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2967 /* These chips are 10/100 only. */
2968 capmask &= ~BMSR_EXTSTAT;
2969 }
2970
2971 /*
2972 * Some controllers seem to require a special firmware to use
2973 * TSO. But the firmware is not available to FreeBSD and Linux
2974 * claims that the TSO performed by the firmware is slower than
2975 * hardware based TSO. Moreover the firmware based TSO has one
2976 * known bug which can't handle TSO if ethernet header + IP/TCP
2977 * header is greater than 80 bytes. The workaround for the TSO
2978 * bug exist but it seems it's too expensive than not using
2979 * TSO at all. Some hardwares also have the TSO bug so limit
2980 * the TSO to the controllers that are not affected TSO issues
2981 * (e.g. 5755 or higher).
2982 */
2983 if (BGE_IS_5717_PLUS(sc)) {
2984 /* BCM5717 requires different TSO configuration. */
2985 sc->bge_flags |= BGE_FLAG_TSO3;
2986 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2987 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2988 /* TSO on BCM5719 A0 does not work. */
2989 sc->bge_flags &= ~BGE_FLAG_TSO3;
2990 }
2991 } else if (BGE_IS_5755_PLUS(sc)) {
2992 /*
2993 * BCM5754 and BCM5787 shares the same ASIC id so
2994 * explicit device id check is required.
2995 * Due to unknown reason TSO does not work on BCM5755M.
2996 */
2997 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2998 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2999 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3000 sc->bge_flags |= BGE_FLAG_TSO;
3001 }
3002
3003 /*
3004 * Check if this is a PCI-X or PCI Express device.
3005 */
3006 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3007 /*
3008 * Found a PCI Express capabilities register, this
3009 * must be a PCI Express device.
3010 */
3011 sc->bge_flags |= BGE_FLAG_PCIE;
3012 sc->bge_expcap = reg;
2935 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2936 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2937 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2938 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2939 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2940 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2941 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2942 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2943 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2944 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2945 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2946 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2947 } else
2948 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2949 }
2950
2951 /* Identify the chips that use an CPMU. */
2952 if (BGE_IS_5717_PLUS(sc) ||
2953 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2954 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2955 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2956 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2957 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2958 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2959 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2960 else
2961 sc->bge_mi_mode = BGE_MIMODE_BASE;
2962 /* Enable auto polling for BCM570[0-5]. */
2963 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2964 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2965
2966 /*
2967 * All Broadcom controllers have 4GB boundary DMA bug.
2968 * Whenever an address crosses a multiple of the 4GB boundary
2969 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2970 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2971 * state machine will lockup and cause the device to hang.
2972 */
2973 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2974
2975 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2976 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2977 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2978
2979 /*
2980 * BCM5719 cannot handle DMA requests for DMA segments that
2981 * have larger than 4KB in size. However the maximum DMA
2982 * segment size created in DMA tag is 4KB for TSO, so we
2983 * wouldn't encounter the issue here.
2984 */
2985 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2986 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
2987
2988 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2989 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2990 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2991 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2992 sc->bge_flags |= BGE_FLAG_5788;
2993 }
2994
2995 capmask = BMSR_DEFCAPMASK;
2996 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2997 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2998 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2999 pci_get_vendor(dev) == BCOM_VENDORID &&
3000 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3001 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3002 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3003 (pci_get_vendor(dev) == BCOM_VENDORID &&
3004 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3005 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3006 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3007 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3008 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3009 /* These chips are 10/100 only. */
3010 capmask &= ~BMSR_EXTSTAT;
3011 }
3012
3013 /*
3014 * Some controllers seem to require a special firmware to use
3015 * TSO. But the firmware is not available to FreeBSD and Linux
3016 * claims that the TSO performed by the firmware is slower than
3017 * hardware based TSO. Moreover the firmware based TSO has one
3018 * known bug which can't handle TSO if ethernet header + IP/TCP
3019 * header is greater than 80 bytes. The workaround for the TSO
3020 * bug exist but it seems it's too expensive than not using
3021 * TSO at all. Some hardwares also have the TSO bug so limit
3022 * the TSO to the controllers that are not affected TSO issues
3023 * (e.g. 5755 or higher).
3024 */
3025 if (BGE_IS_5717_PLUS(sc)) {
3026 /* BCM5717 requires different TSO configuration. */
3027 sc->bge_flags |= BGE_FLAG_TSO3;
3028 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3029 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3030 /* TSO on BCM5719 A0 does not work. */
3031 sc->bge_flags &= ~BGE_FLAG_TSO3;
3032 }
3033 } else if (BGE_IS_5755_PLUS(sc)) {
3034 /*
3035 * BCM5754 and BCM5787 shares the same ASIC id so
3036 * explicit device id check is required.
3037 * Due to unknown reason TSO does not work on BCM5755M.
3038 */
3039 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3040 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3041 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3042 sc->bge_flags |= BGE_FLAG_TSO;
3043 }
3044
3045 /*
3046 * Check if this is a PCI-X or PCI Express device.
3047 */
3048 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3049 /*
3050 * Found a PCI Express capabilities register, this
3051 * must be a PCI Express device.
3052 */
3053 sc->bge_flags |= BGE_FLAG_PCIE;
3054 sc->bge_expcap = reg;
3013 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3055 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3056 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3014 pci_set_max_read_req(dev, 2048);
3015 else if (pci_get_max_read_req(dev) != 4096)
3016 pci_set_max_read_req(dev, 4096);
3017 } else {
3018 /*
3019 * Check if the device is in PCI-X Mode.
3020 * (This bit is not valid on PCI Express controllers.)
3021 */
3022 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3023 sc->bge_pcixcap = reg;
3024 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3025 BGE_PCISTATE_PCI_BUSMODE) == 0)
3026 sc->bge_flags |= BGE_FLAG_PCIX;
3027 }
3028
3029 /*
3030 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3031 * not actually a MAC controller bug but an issue with the embedded
3032 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3033 */
3034 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3035 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3036 /*
3037 * Allocate the interrupt, using MSI if possible. These devices
3038 * support 8 MSI messages, but only the first one is used in
3039 * normal operation.
3040 */
3041 rid = 0;
3042 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3043 sc->bge_msicap = reg;
3044 if (bge_can_use_msi(sc)) {
3045 msicount = pci_msi_count(dev);
3046 if (msicount > 1)
3047 msicount = 1;
3048 } else
3049 msicount = 0;
3050 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3051 rid = 1;
3052 sc->bge_flags |= BGE_FLAG_MSI;
3053 }
3054 }
3055
3056 /*
3057 * All controllers except BCM5700 supports tagged status but
3058 * we use tagged status only for MSI case on BCM5717. Otherwise
3059 * MSI on BCM5717 does not work.
3060 */
3061#ifndef DEVICE_POLLING
3062 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3063 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3064#endif
3065
3066 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3067 RF_SHAREABLE | RF_ACTIVE);
3068
3069 if (sc->bge_irq == NULL) {
3070 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3071 error = ENXIO;
3072 goto fail;
3073 }
3074
3075 device_printf(dev,
3076 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3077 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3078 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3079 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3080
3081 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3082
3083 /* Try to reset the chip. */
3084 if (bge_reset(sc)) {
3085 device_printf(sc->bge_dev, "chip reset failed\n");
3086 error = ENXIO;
3087 goto fail;
3088 }
3089
3090 sc->bge_asf_mode = 0;
3091 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3092 BGE_SRAM_DATA_SIG_MAGIC)) {
3093 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3094 & BGE_HWCFG_ASF) {
3095 sc->bge_asf_mode |= ASF_ENABLE;
3096 sc->bge_asf_mode |= ASF_STACKUP;
3097 if (BGE_IS_575X_PLUS(sc))
3098 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3099 }
3100 }
3101
3102 /* Try to reset the chip again the nice way. */
3103 bge_stop_fw(sc);
3104 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3105 if (bge_reset(sc)) {
3106 device_printf(sc->bge_dev, "chip reset failed\n");
3107 error = ENXIO;
3108 goto fail;
3109 }
3110
3111 bge_sig_legacy(sc, BGE_RESET_STOP);
3112 bge_sig_post_reset(sc, BGE_RESET_STOP);
3113
3114 if (bge_chipinit(sc)) {
3115 device_printf(sc->bge_dev, "chip initialization failed\n");
3116 error = ENXIO;
3117 goto fail;
3118 }
3119
3120 error = bge_get_eaddr(sc, eaddr);
3121 if (error) {
3122 device_printf(sc->bge_dev,
3123 "failed to read station address\n");
3124 error = ENXIO;
3125 goto fail;
3126 }
3127
3128 /* 5705 limits RX return ring to 512 entries. */
3129 if (BGE_IS_5717_PLUS(sc))
3130 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3131 else if (BGE_IS_5705_PLUS(sc))
3132 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3133 else
3134 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3135
3136 if (bge_dma_alloc(sc)) {
3137 device_printf(sc->bge_dev,
3138 "failed to allocate DMA resources\n");
3139 error = ENXIO;
3140 goto fail;
3141 }
3142
3143 bge_add_sysctls(sc);
3144
3145 /* Set default tuneable values. */
3146 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3147 sc->bge_rx_coal_ticks = 150;
3148 sc->bge_tx_coal_ticks = 150;
3149 sc->bge_rx_max_coal_bds = 10;
3150 sc->bge_tx_max_coal_bds = 10;
3151
3152 /* Initialize checksum features to use. */
3153 sc->bge_csum_features = BGE_CSUM_FEATURES;
3154 if (sc->bge_forced_udpcsum != 0)
3155 sc->bge_csum_features |= CSUM_UDP;
3156
3157 /* Set up ifnet structure */
3158 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3159 if (ifp == NULL) {
3160 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3161 error = ENXIO;
3162 goto fail;
3163 }
3164 ifp->if_softc = sc;
3165 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3167 ifp->if_ioctl = bge_ioctl;
3168 ifp->if_start = bge_start;
3169 ifp->if_init = bge_init;
3170 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3171 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3172 IFQ_SET_READY(&ifp->if_snd);
3173 ifp->if_hwassist = sc->bge_csum_features;
3174 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3175 IFCAP_VLAN_MTU;
3176 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3177 ifp->if_hwassist |= CSUM_TSO;
3178 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3179 }
3180#ifdef IFCAP_VLAN_HWCSUM
3181 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3182#endif
3183 ifp->if_capenable = ifp->if_capabilities;
3184#ifdef DEVICE_POLLING
3185 ifp->if_capabilities |= IFCAP_POLLING;
3186#endif
3187
3188 /*
3189 * 5700 B0 chips do not support checksumming correctly due
3190 * to hardware bugs.
3191 */
3192 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3193 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3194 ifp->if_capenable &= ~IFCAP_HWCSUM;
3195 ifp->if_hwassist = 0;
3196 }
3197
3198 /*
3199 * Figure out what sort of media we have by checking the
3200 * hardware config word in the first 32k of NIC internal memory,
3201 * or fall back to examining the EEPROM if necessary.
3202 * Note: on some BCM5700 cards, this value appears to be unset.
3203 * If that's the case, we have to rely on identifying the NIC
3204 * by its PCI subsystem ID, as we do below for the SysKonnect
3205 * SK-9D41.
3206 */
3207 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3208 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3209 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3210 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3211 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3212 sizeof(hwcfg))) {
3213 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3214 error = ENXIO;
3215 goto fail;
3216 }
3217 hwcfg = ntohl(hwcfg);
3218 }
3219
3220 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3221 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3222 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3223 if (BGE_IS_5714_FAMILY(sc))
3224 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3225 else
3226 sc->bge_flags |= BGE_FLAG_TBI;
3227 }
3228
3229 if (sc->bge_flags & BGE_FLAG_TBI) {
3230 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3231 bge_ifmedia_sts);
3232 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3233 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3234 0, NULL);
3235 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3236 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3237 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3238 } else {
3239 /*
3240 * Do transceiver setup and tell the firmware the
3241 * driver is down so we can try to get access the
3242 * probe if ASF is running. Retry a couple of times
3243 * if we get a conflict with the ASF firmware accessing
3244 * the PHY.
3245 */
3246 trys = 0;
3247 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3248again:
3249 bge_asf_driver_up(sc);
3250
3251 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3252 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3253 MIIF_DOPAUSE);
3254 if (error != 0) {
3255 if (trys++ < 4) {
3256 device_printf(sc->bge_dev, "Try again\n");
3257 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3258 BMCR_RESET);
3259 goto again;
3260 }
3261 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3262 goto fail;
3263 }
3264
3265 /*
3266 * Now tell the firmware we are going up after probing the PHY
3267 */
3268 if (sc->bge_asf_mode & ASF_STACKUP)
3269 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3270 }
3271
3272 /*
3273 * When using the BCM5701 in PCI-X mode, data corruption has
3274 * been observed in the first few bytes of some received packets.
3275 * Aligning the packet buffer in memory eliminates the corruption.
3276 * Unfortunately, this misaligns the packet payloads. On platforms
3277 * which do not support unaligned accesses, we will realign the
3278 * payloads by copying the received packets.
3279 */
3280 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3281 sc->bge_flags & BGE_FLAG_PCIX)
3282 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3283
3284 /*
3285 * Call MI attach routine.
3286 */
3287 ether_ifattach(ifp, eaddr);
3288 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3289
3290 /* Tell upper layer we support long frames. */
3291 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3292
3293 /*
3294 * Hookup IRQ last.
3295 */
3296 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3297 /* Take advantage of single-shot MSI. */
3298 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3299 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3300 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3301 taskqueue_thread_enqueue, &sc->bge_tq);
3302 if (sc->bge_tq == NULL) {
3303 device_printf(dev, "could not create taskqueue.\n");
3304 ether_ifdetach(ifp);
3305 error = ENXIO;
3306 goto fail;
3307 }
3308 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3309 device_get_nameunit(sc->bge_dev));
3310 error = bus_setup_intr(dev, sc->bge_irq,
3311 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3312 &sc->bge_intrhand);
3313 if (error)
3314 ether_ifdetach(ifp);
3315 } else
3316 error = bus_setup_intr(dev, sc->bge_irq,
3317 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3318 &sc->bge_intrhand);
3319
3320 if (error) {
3321 bge_detach(dev);
3322 device_printf(sc->bge_dev, "couldn't set up irq\n");
3323 }
3324
3325 return (0);
3326
3327fail:
3328 bge_release_resources(sc);
3329
3330 return (error);
3331}
3332
3333static int
3334bge_detach(device_t dev)
3335{
3336 struct bge_softc *sc;
3337 struct ifnet *ifp;
3338
3339 sc = device_get_softc(dev);
3340 ifp = sc->bge_ifp;
3341
3342#ifdef DEVICE_POLLING
3343 if (ifp->if_capenable & IFCAP_POLLING)
3344 ether_poll_deregister(ifp);
3345#endif
3346
3347 BGE_LOCK(sc);
3348 bge_stop(sc);
3349 bge_reset(sc);
3350 BGE_UNLOCK(sc);
3351
3352 callout_drain(&sc->bge_stat_ch);
3353
3354 if (sc->bge_tq)
3355 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3356 ether_ifdetach(ifp);
3357
3358 if (sc->bge_flags & BGE_FLAG_TBI) {
3359 ifmedia_removeall(&sc->bge_ifmedia);
3360 } else {
3361 bus_generic_detach(dev);
3362 device_delete_child(dev, sc->bge_miibus);
3363 }
3364
3365 bge_release_resources(sc);
3366
3367 return (0);
3368}
3369
3370static void
3371bge_release_resources(struct bge_softc *sc)
3372{
3373 device_t dev;
3374
3375 dev = sc->bge_dev;
3376
3377 if (sc->bge_tq != NULL)
3378 taskqueue_free(sc->bge_tq);
3379
3380 if (sc->bge_intrhand != NULL)
3381 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3382
3383 if (sc->bge_irq != NULL)
3384 bus_release_resource(dev, SYS_RES_IRQ,
3385 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3386
3387 if (sc->bge_flags & BGE_FLAG_MSI)
3388 pci_release_msi(dev);
3389
3390 if (sc->bge_res != NULL)
3391 bus_release_resource(dev, SYS_RES_MEMORY,
3392 PCIR_BAR(0), sc->bge_res);
3393
3394 if (sc->bge_ifp != NULL)
3395 if_free(sc->bge_ifp);
3396
3397 bge_dma_free(sc);
3398
3399 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3400 BGE_LOCK_DESTROY(sc);
3401}
3402
3403static int
3404bge_reset(struct bge_softc *sc)
3405{
3406 device_t dev;
3407 uint32_t cachesize, command, pcistate, reset, val;
3408 void (*write_op)(struct bge_softc *, int, int);
3409 uint16_t devctl;
3410 int i;
3411
3412 dev = sc->bge_dev;
3413
3414 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3415 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3416 if (sc->bge_flags & BGE_FLAG_PCIE)
3417 write_op = bge_writemem_direct;
3418 else
3419 write_op = bge_writemem_ind;
3420 } else
3421 write_op = bge_writereg_ind;
3422
3423 /* Save some important PCI state. */
3424 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3425 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3426 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3427
3428 pci_write_config(dev, BGE_PCI_MISC_CTL,
3429 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3430 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3431
3432 /* Disable fastboot on controllers that support it. */
3433 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3434 BGE_IS_5755_PLUS(sc)) {
3435 if (bootverbose)
3436 device_printf(dev, "Disabling fastboot\n");
3437 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3438 }
3439
3440 /*
3441 * Write the magic number to SRAM at offset 0xB50.
3442 * When firmware finishes its initialization it will
3443 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3444 */
3445 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3446
3447 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3448
3449 /* XXX: Broadcom Linux driver. */
3450 if (sc->bge_flags & BGE_FLAG_PCIE) {
3451 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3452 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3453 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3454 /* Prevent PCIE link training during global reset */
3455 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3456 reset |= 1 << 29;
3457 }
3458 }
3459
3460 /*
3461 * Set GPHY Power Down Override to leave GPHY
3462 * powered up in D0 uninitialized.
3463 */
3464 if (BGE_IS_5705_PLUS(sc) &&
3465 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3466 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3467
3468 /* Issue global reset */
3469 write_op(sc, BGE_MISC_CFG, reset);
3470
3471 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3472 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3473 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3474 val | BGE_VCPU_STATUS_DRV_RESET);
3475 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3476 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3477 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3478 }
3479
3480 DELAY(1000);
3481
3482 /* XXX: Broadcom Linux driver. */
3483 if (sc->bge_flags & BGE_FLAG_PCIE) {
3484 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3485 DELAY(500000); /* wait for link training to complete */
3486 val = pci_read_config(dev, 0xC4, 4);
3487 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3488 }
3489 devctl = pci_read_config(dev,
3490 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3491 /* Clear enable no snoop and disable relaxed ordering. */
3492 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3493 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3494 /* Set PCIE max payload size to 128. */
3495 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3496 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3497 devctl, 2);
3498 /* Clear error status. */
3499 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3500 PCIM_EXP_STA_CORRECTABLE_ERROR |
3501 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3502 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3503 }
3504
3505 /* Reset some of the PCI state that got zapped by reset. */
3506 pci_write_config(dev, BGE_PCI_MISC_CTL,
3507 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3508 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3509 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3510 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3511 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3512 /*
3513 * Disable PCI-X relaxed ordering to ensure status block update
3514 * comes first then packet buffer DMA. Otherwise driver may
3515 * read stale status block.
3516 */
3517 if (sc->bge_flags & BGE_FLAG_PCIX) {
3518 devctl = pci_read_config(dev,
3519 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3520 devctl &= ~PCIXM_COMMAND_ERO;
3521 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3522 devctl &= ~PCIXM_COMMAND_MAX_READ;
3523 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3524 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3525 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3526 PCIXM_COMMAND_MAX_READ);
3527 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3528 }
3529 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3530 devctl, 2);
3531 }
3532 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3533 if (BGE_IS_5714_FAMILY(sc)) {
3534 /* This chip disables MSI on reset. */
3535 if (sc->bge_flags & BGE_FLAG_MSI) {
3536 val = pci_read_config(dev,
3537 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3538 pci_write_config(dev,
3539 sc->bge_msicap + PCIR_MSI_CTRL,
3540 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3541 val = CSR_READ_4(sc, BGE_MSI_MODE);
3542 CSR_WRITE_4(sc, BGE_MSI_MODE,
3543 val | BGE_MSIMODE_ENABLE);
3544 }
3545 val = CSR_READ_4(sc, BGE_MARB_MODE);
3546 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3547 } else
3548 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3549
3550 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3551 for (i = 0; i < BGE_TIMEOUT; i++) {
3552 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3553 if (val & BGE_VCPU_STATUS_INIT_DONE)
3554 break;
3555 DELAY(100);
3556 }
3557 if (i == BGE_TIMEOUT) {
3558 device_printf(dev, "reset timed out\n");
3559 return (1);
3560 }
3561 } else {
3562 /*
3563 * Poll until we see the 1's complement of the magic number.
3564 * This indicates that the firmware initialization is complete.
3565 * We expect this to fail if no chip containing the Ethernet
3566 * address is fitted though.
3567 */
3568 for (i = 0; i < BGE_TIMEOUT; i++) {
3569 DELAY(10);
3570 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3571 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3572 break;
3573 }
3574
3575 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3576 device_printf(dev,
3577 "firmware handshake timed out, found 0x%08x\n",
3578 val);
3579 /* BCM57765 A0 needs additional time before accessing. */
3580 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3581 DELAY(10 * 1000); /* XXX */
3582 }
3583
3584 /*
3585 * XXX Wait for the value of the PCISTATE register to
3586 * return to its original pre-reset state. This is a
3587 * fairly good indicator of reset completion. If we don't
3588 * wait for the reset to fully complete, trying to read
3589 * from the device's non-PCI registers may yield garbage
3590 * results.
3591 */
3592 for (i = 0; i < BGE_TIMEOUT; i++) {
3593 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3594 break;
3595 DELAY(10);
3596 }
3597
3598 /* Fix up byte swapping. */
3057 pci_set_max_read_req(dev, 2048);
3058 else if (pci_get_max_read_req(dev) != 4096)
3059 pci_set_max_read_req(dev, 4096);
3060 } else {
3061 /*
3062 * Check if the device is in PCI-X Mode.
3063 * (This bit is not valid on PCI Express controllers.)
3064 */
3065 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3066 sc->bge_pcixcap = reg;
3067 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3068 BGE_PCISTATE_PCI_BUSMODE) == 0)
3069 sc->bge_flags |= BGE_FLAG_PCIX;
3070 }
3071
3072 /*
3073 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3074 * not actually a MAC controller bug but an issue with the embedded
3075 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3076 */
3077 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3078 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3079 /*
3080 * Allocate the interrupt, using MSI if possible. These devices
3081 * support 8 MSI messages, but only the first one is used in
3082 * normal operation.
3083 */
3084 rid = 0;
3085 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3086 sc->bge_msicap = reg;
3087 if (bge_can_use_msi(sc)) {
3088 msicount = pci_msi_count(dev);
3089 if (msicount > 1)
3090 msicount = 1;
3091 } else
3092 msicount = 0;
3093 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3094 rid = 1;
3095 sc->bge_flags |= BGE_FLAG_MSI;
3096 }
3097 }
3098
3099 /*
3100 * All controllers except BCM5700 supports tagged status but
3101 * we use tagged status only for MSI case on BCM5717. Otherwise
3102 * MSI on BCM5717 does not work.
3103 */
3104#ifndef DEVICE_POLLING
3105 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3106 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3107#endif
3108
3109 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3110 RF_SHAREABLE | RF_ACTIVE);
3111
3112 if (sc->bge_irq == NULL) {
3113 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3114 error = ENXIO;
3115 goto fail;
3116 }
3117
3118 device_printf(dev,
3119 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3120 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3121 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3122 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3123
3124 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3125
3126 /* Try to reset the chip. */
3127 if (bge_reset(sc)) {
3128 device_printf(sc->bge_dev, "chip reset failed\n");
3129 error = ENXIO;
3130 goto fail;
3131 }
3132
3133 sc->bge_asf_mode = 0;
3134 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3135 BGE_SRAM_DATA_SIG_MAGIC)) {
3136 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3137 & BGE_HWCFG_ASF) {
3138 sc->bge_asf_mode |= ASF_ENABLE;
3139 sc->bge_asf_mode |= ASF_STACKUP;
3140 if (BGE_IS_575X_PLUS(sc))
3141 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3142 }
3143 }
3144
3145 /* Try to reset the chip again the nice way. */
3146 bge_stop_fw(sc);
3147 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3148 if (bge_reset(sc)) {
3149 device_printf(sc->bge_dev, "chip reset failed\n");
3150 error = ENXIO;
3151 goto fail;
3152 }
3153
3154 bge_sig_legacy(sc, BGE_RESET_STOP);
3155 bge_sig_post_reset(sc, BGE_RESET_STOP);
3156
3157 if (bge_chipinit(sc)) {
3158 device_printf(sc->bge_dev, "chip initialization failed\n");
3159 error = ENXIO;
3160 goto fail;
3161 }
3162
3163 error = bge_get_eaddr(sc, eaddr);
3164 if (error) {
3165 device_printf(sc->bge_dev,
3166 "failed to read station address\n");
3167 error = ENXIO;
3168 goto fail;
3169 }
3170
3171 /* 5705 limits RX return ring to 512 entries. */
3172 if (BGE_IS_5717_PLUS(sc))
3173 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3174 else if (BGE_IS_5705_PLUS(sc))
3175 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3176 else
3177 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3178
3179 if (bge_dma_alloc(sc)) {
3180 device_printf(sc->bge_dev,
3181 "failed to allocate DMA resources\n");
3182 error = ENXIO;
3183 goto fail;
3184 }
3185
3186 bge_add_sysctls(sc);
3187
3188 /* Set default tuneable values. */
3189 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3190 sc->bge_rx_coal_ticks = 150;
3191 sc->bge_tx_coal_ticks = 150;
3192 sc->bge_rx_max_coal_bds = 10;
3193 sc->bge_tx_max_coal_bds = 10;
3194
3195 /* Initialize checksum features to use. */
3196 sc->bge_csum_features = BGE_CSUM_FEATURES;
3197 if (sc->bge_forced_udpcsum != 0)
3198 sc->bge_csum_features |= CSUM_UDP;
3199
3200 /* Set up ifnet structure */
3201 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3202 if (ifp == NULL) {
3203 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3204 error = ENXIO;
3205 goto fail;
3206 }
3207 ifp->if_softc = sc;
3208 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3209 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3210 ifp->if_ioctl = bge_ioctl;
3211 ifp->if_start = bge_start;
3212 ifp->if_init = bge_init;
3213 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3214 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3215 IFQ_SET_READY(&ifp->if_snd);
3216 ifp->if_hwassist = sc->bge_csum_features;
3217 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3218 IFCAP_VLAN_MTU;
3219 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3220 ifp->if_hwassist |= CSUM_TSO;
3221 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3222 }
3223#ifdef IFCAP_VLAN_HWCSUM
3224 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3225#endif
3226 ifp->if_capenable = ifp->if_capabilities;
3227#ifdef DEVICE_POLLING
3228 ifp->if_capabilities |= IFCAP_POLLING;
3229#endif
3230
3231 /*
3232 * 5700 B0 chips do not support checksumming correctly due
3233 * to hardware bugs.
3234 */
3235 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3236 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3237 ifp->if_capenable &= ~IFCAP_HWCSUM;
3238 ifp->if_hwassist = 0;
3239 }
3240
3241 /*
3242 * Figure out what sort of media we have by checking the
3243 * hardware config word in the first 32k of NIC internal memory,
3244 * or fall back to examining the EEPROM if necessary.
3245 * Note: on some BCM5700 cards, this value appears to be unset.
3246 * If that's the case, we have to rely on identifying the NIC
3247 * by its PCI subsystem ID, as we do below for the SysKonnect
3248 * SK-9D41.
3249 */
3250 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3251 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3252 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3253 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3254 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3255 sizeof(hwcfg))) {
3256 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3257 error = ENXIO;
3258 goto fail;
3259 }
3260 hwcfg = ntohl(hwcfg);
3261 }
3262
3263 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3264 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3265 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3266 if (BGE_IS_5714_FAMILY(sc))
3267 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3268 else
3269 sc->bge_flags |= BGE_FLAG_TBI;
3270 }
3271
3272 if (sc->bge_flags & BGE_FLAG_TBI) {
3273 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3274 bge_ifmedia_sts);
3275 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3276 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3277 0, NULL);
3278 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3279 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3280 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3281 } else {
3282 /*
3283 * Do transceiver setup and tell the firmware the
3284 * driver is down so we can try to get access the
3285 * probe if ASF is running. Retry a couple of times
3286 * if we get a conflict with the ASF firmware accessing
3287 * the PHY.
3288 */
3289 trys = 0;
3290 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3291again:
3292 bge_asf_driver_up(sc);
3293
3294 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3295 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3296 MIIF_DOPAUSE);
3297 if (error != 0) {
3298 if (trys++ < 4) {
3299 device_printf(sc->bge_dev, "Try again\n");
3300 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3301 BMCR_RESET);
3302 goto again;
3303 }
3304 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3305 goto fail;
3306 }
3307
3308 /*
3309 * Now tell the firmware we are going up after probing the PHY
3310 */
3311 if (sc->bge_asf_mode & ASF_STACKUP)
3312 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3313 }
3314
3315 /*
3316 * When using the BCM5701 in PCI-X mode, data corruption has
3317 * been observed in the first few bytes of some received packets.
3318 * Aligning the packet buffer in memory eliminates the corruption.
3319 * Unfortunately, this misaligns the packet payloads. On platforms
3320 * which do not support unaligned accesses, we will realign the
3321 * payloads by copying the received packets.
3322 */
3323 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3324 sc->bge_flags & BGE_FLAG_PCIX)
3325 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3326
3327 /*
3328 * Call MI attach routine.
3329 */
3330 ether_ifattach(ifp, eaddr);
3331 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3332
3333 /* Tell upper layer we support long frames. */
3334 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3335
3336 /*
3337 * Hookup IRQ last.
3338 */
3339 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3340 /* Take advantage of single-shot MSI. */
3341 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3342 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3343 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3344 taskqueue_thread_enqueue, &sc->bge_tq);
3345 if (sc->bge_tq == NULL) {
3346 device_printf(dev, "could not create taskqueue.\n");
3347 ether_ifdetach(ifp);
3348 error = ENXIO;
3349 goto fail;
3350 }
3351 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3352 device_get_nameunit(sc->bge_dev));
3353 error = bus_setup_intr(dev, sc->bge_irq,
3354 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3355 &sc->bge_intrhand);
3356 if (error)
3357 ether_ifdetach(ifp);
3358 } else
3359 error = bus_setup_intr(dev, sc->bge_irq,
3360 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3361 &sc->bge_intrhand);
3362
3363 if (error) {
3364 bge_detach(dev);
3365 device_printf(sc->bge_dev, "couldn't set up irq\n");
3366 }
3367
3368 return (0);
3369
3370fail:
3371 bge_release_resources(sc);
3372
3373 return (error);
3374}
3375
3376static int
3377bge_detach(device_t dev)
3378{
3379 struct bge_softc *sc;
3380 struct ifnet *ifp;
3381
3382 sc = device_get_softc(dev);
3383 ifp = sc->bge_ifp;
3384
3385#ifdef DEVICE_POLLING
3386 if (ifp->if_capenable & IFCAP_POLLING)
3387 ether_poll_deregister(ifp);
3388#endif
3389
3390 BGE_LOCK(sc);
3391 bge_stop(sc);
3392 bge_reset(sc);
3393 BGE_UNLOCK(sc);
3394
3395 callout_drain(&sc->bge_stat_ch);
3396
3397 if (sc->bge_tq)
3398 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3399 ether_ifdetach(ifp);
3400
3401 if (sc->bge_flags & BGE_FLAG_TBI) {
3402 ifmedia_removeall(&sc->bge_ifmedia);
3403 } else {
3404 bus_generic_detach(dev);
3405 device_delete_child(dev, sc->bge_miibus);
3406 }
3407
3408 bge_release_resources(sc);
3409
3410 return (0);
3411}
3412
3413static void
3414bge_release_resources(struct bge_softc *sc)
3415{
3416 device_t dev;
3417
3418 dev = sc->bge_dev;
3419
3420 if (sc->bge_tq != NULL)
3421 taskqueue_free(sc->bge_tq);
3422
3423 if (sc->bge_intrhand != NULL)
3424 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3425
3426 if (sc->bge_irq != NULL)
3427 bus_release_resource(dev, SYS_RES_IRQ,
3428 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3429
3430 if (sc->bge_flags & BGE_FLAG_MSI)
3431 pci_release_msi(dev);
3432
3433 if (sc->bge_res != NULL)
3434 bus_release_resource(dev, SYS_RES_MEMORY,
3435 PCIR_BAR(0), sc->bge_res);
3436
3437 if (sc->bge_ifp != NULL)
3438 if_free(sc->bge_ifp);
3439
3440 bge_dma_free(sc);
3441
3442 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3443 BGE_LOCK_DESTROY(sc);
3444}
3445
3446static int
3447bge_reset(struct bge_softc *sc)
3448{
3449 device_t dev;
3450 uint32_t cachesize, command, pcistate, reset, val;
3451 void (*write_op)(struct bge_softc *, int, int);
3452 uint16_t devctl;
3453 int i;
3454
3455 dev = sc->bge_dev;
3456
3457 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3458 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3459 if (sc->bge_flags & BGE_FLAG_PCIE)
3460 write_op = bge_writemem_direct;
3461 else
3462 write_op = bge_writemem_ind;
3463 } else
3464 write_op = bge_writereg_ind;
3465
3466 /* Save some important PCI state. */
3467 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3468 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3469 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3470
3471 pci_write_config(dev, BGE_PCI_MISC_CTL,
3472 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3473 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3474
3475 /* Disable fastboot on controllers that support it. */
3476 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3477 BGE_IS_5755_PLUS(sc)) {
3478 if (bootverbose)
3479 device_printf(dev, "Disabling fastboot\n");
3480 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3481 }
3482
3483 /*
3484 * Write the magic number to SRAM at offset 0xB50.
3485 * When firmware finishes its initialization it will
3486 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3487 */
3488 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3489
3490 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3491
3492 /* XXX: Broadcom Linux driver. */
3493 if (sc->bge_flags & BGE_FLAG_PCIE) {
3494 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3495 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3496 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3497 /* Prevent PCIE link training during global reset */
3498 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3499 reset |= 1 << 29;
3500 }
3501 }
3502
3503 /*
3504 * Set GPHY Power Down Override to leave GPHY
3505 * powered up in D0 uninitialized.
3506 */
3507 if (BGE_IS_5705_PLUS(sc) &&
3508 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3509 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3510
3511 /* Issue global reset */
3512 write_op(sc, BGE_MISC_CFG, reset);
3513
3514 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3515 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3516 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3517 val | BGE_VCPU_STATUS_DRV_RESET);
3518 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3519 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3520 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3521 }
3522
3523 DELAY(1000);
3524
3525 /* XXX: Broadcom Linux driver. */
3526 if (sc->bge_flags & BGE_FLAG_PCIE) {
3527 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3528 DELAY(500000); /* wait for link training to complete */
3529 val = pci_read_config(dev, 0xC4, 4);
3530 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3531 }
3532 devctl = pci_read_config(dev,
3533 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3534 /* Clear enable no snoop and disable relaxed ordering. */
3535 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3536 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3537 /* Set PCIE max payload size to 128. */
3538 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3539 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3540 devctl, 2);
3541 /* Clear error status. */
3542 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3543 PCIM_EXP_STA_CORRECTABLE_ERROR |
3544 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3545 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3546 }
3547
3548 /* Reset some of the PCI state that got zapped by reset. */
3549 pci_write_config(dev, BGE_PCI_MISC_CTL,
3550 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3551 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3552 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3553 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3554 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3555 /*
3556 * Disable PCI-X relaxed ordering to ensure status block update
3557 * comes first then packet buffer DMA. Otherwise driver may
3558 * read stale status block.
3559 */
3560 if (sc->bge_flags & BGE_FLAG_PCIX) {
3561 devctl = pci_read_config(dev,
3562 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3563 devctl &= ~PCIXM_COMMAND_ERO;
3564 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3565 devctl &= ~PCIXM_COMMAND_MAX_READ;
3566 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3567 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3568 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3569 PCIXM_COMMAND_MAX_READ);
3570 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3571 }
3572 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3573 devctl, 2);
3574 }
3575 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3576 if (BGE_IS_5714_FAMILY(sc)) {
3577 /* This chip disables MSI on reset. */
3578 if (sc->bge_flags & BGE_FLAG_MSI) {
3579 val = pci_read_config(dev,
3580 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3581 pci_write_config(dev,
3582 sc->bge_msicap + PCIR_MSI_CTRL,
3583 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3584 val = CSR_READ_4(sc, BGE_MSI_MODE);
3585 CSR_WRITE_4(sc, BGE_MSI_MODE,
3586 val | BGE_MSIMODE_ENABLE);
3587 }
3588 val = CSR_READ_4(sc, BGE_MARB_MODE);
3589 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3590 } else
3591 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3592
3593 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3594 for (i = 0; i < BGE_TIMEOUT; i++) {
3595 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3596 if (val & BGE_VCPU_STATUS_INIT_DONE)
3597 break;
3598 DELAY(100);
3599 }
3600 if (i == BGE_TIMEOUT) {
3601 device_printf(dev, "reset timed out\n");
3602 return (1);
3603 }
3604 } else {
3605 /*
3606 * Poll until we see the 1's complement of the magic number.
3607 * This indicates that the firmware initialization is complete.
3608 * We expect this to fail if no chip containing the Ethernet
3609 * address is fitted though.
3610 */
3611 for (i = 0; i < BGE_TIMEOUT; i++) {
3612 DELAY(10);
3613 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3614 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3615 break;
3616 }
3617
3618 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3619 device_printf(dev,
3620 "firmware handshake timed out, found 0x%08x\n",
3621 val);
3622 /* BCM57765 A0 needs additional time before accessing. */
3623 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3624 DELAY(10 * 1000); /* XXX */
3625 }
3626
3627 /*
3628 * XXX Wait for the value of the PCISTATE register to
3629 * return to its original pre-reset state. This is a
3630 * fairly good indicator of reset completion. If we don't
3631 * wait for the reset to fully complete, trying to read
3632 * from the device's non-PCI registers may yield garbage
3633 * results.
3634 */
3635 for (i = 0; i < BGE_TIMEOUT; i++) {
3636 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3637 break;
3638 DELAY(10);
3639 }
3640
3641 /* Fix up byte swapping. */
3599 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3600 BGE_MODECTL_BYTESWAP_DATA);
3642 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3601
3602 /* Tell the ASF firmware we are up */
3603 if (sc->bge_asf_mode & ASF_STACKUP)
3604 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3605
3606 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3607
3608 /*
3609 * The 5704 in TBI mode apparently needs some special
3610 * adjustment to insure the SERDES drive level is set
3611 * to 1.2V.
3612 */
3613 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3614 sc->bge_flags & BGE_FLAG_TBI) {
3615 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3616 val = (val & ~0xFFF) | 0x880;
3617 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3618 }
3619
3620 /* XXX: Broadcom Linux driver. */
3621 if (sc->bge_flags & BGE_FLAG_PCIE &&
3622 !BGE_IS_5717_PLUS(sc) &&
3623 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3624 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3625 /* Enable Data FIFO protection. */
3626 val = CSR_READ_4(sc, 0x7C00);
3627 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3628 }
3629 DELAY(10000);
3630
3643
3644 /* Tell the ASF firmware we are up */
3645 if (sc->bge_asf_mode & ASF_STACKUP)
3646 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3647
3648 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3649
3650 /*
3651 * The 5704 in TBI mode apparently needs some special
3652 * adjustment to insure the SERDES drive level is set
3653 * to 1.2V.
3654 */
3655 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3656 sc->bge_flags & BGE_FLAG_TBI) {
3657 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3658 val = (val & ~0xFFF) | 0x880;
3659 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3660 }
3661
3662 /* XXX: Broadcom Linux driver. */
3663 if (sc->bge_flags & BGE_FLAG_PCIE &&
3664 !BGE_IS_5717_PLUS(sc) &&
3665 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3666 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3667 /* Enable Data FIFO protection. */
3668 val = CSR_READ_4(sc, 0x7C00);
3669 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3670 }
3671 DELAY(10000);
3672
3673 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
3674 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3675 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3676
3631 return (0);
3632}
3633
3634static __inline void
3635bge_rxreuse_std(struct bge_softc *sc, int i)
3636{
3637 struct bge_rx_bd *r;
3638
3639 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3640 r->bge_flags = BGE_RXBDFLAG_END;
3641 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3642 r->bge_idx = i;
3643 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3644}
3645
3646static __inline void
3647bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3648{
3649 struct bge_extrx_bd *r;
3650
3651 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3652 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3653 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3654 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3655 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3656 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3657 r->bge_idx = i;
3658 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3659}
3660
3661/*
3662 * Frame reception handling. This is called if there's a frame
3663 * on the receive return list.
3664 *
3665 * Note: we have to be able to handle two possibilities here:
3666 * 1) the frame is from the jumbo receive ring
3667 * 2) the frame is from the standard receive ring
3668 */
3669
3670static int
3671bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3672{
3673 struct ifnet *ifp;
3674 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3675 uint16_t rx_cons;
3676
3677 rx_cons = sc->bge_rx_saved_considx;
3678
3679 /* Nothing to do. */
3680 if (rx_cons == rx_prod)
3681 return (rx_npkts);
3682
3683 ifp = sc->bge_ifp;
3684
3685 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3686 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3687 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3688 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3689 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3690 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3691 (MCLBYTES - ETHER_ALIGN))
3692 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3693 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3694
3695 while (rx_cons != rx_prod) {
3696 struct bge_rx_bd *cur_rx;
3697 uint32_t rxidx;
3698 struct mbuf *m = NULL;
3699 uint16_t vlan_tag = 0;
3700 int have_tag = 0;
3701
3702#ifdef DEVICE_POLLING
3703 if (ifp->if_capenable & IFCAP_POLLING) {
3704 if (sc->rxcycles <= 0)
3705 break;
3706 sc->rxcycles--;
3707 }
3708#endif
3709
3710 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3711
3712 rxidx = cur_rx->bge_idx;
3713 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3714
3715 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3716 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3717 have_tag = 1;
3718 vlan_tag = cur_rx->bge_vlan_tag;
3719 }
3720
3721 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3722 jumbocnt++;
3723 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3724 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3725 bge_rxreuse_jumbo(sc, rxidx);
3726 continue;
3727 }
3728 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3729 bge_rxreuse_jumbo(sc, rxidx);
3730 ifp->if_iqdrops++;
3731 continue;
3732 }
3733 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3734 } else {
3735 stdcnt++;
3736 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3737 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3738 bge_rxreuse_std(sc, rxidx);
3739 continue;
3740 }
3741 if (bge_newbuf_std(sc, rxidx) != 0) {
3742 bge_rxreuse_std(sc, rxidx);
3743 ifp->if_iqdrops++;
3744 continue;
3745 }
3746 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3747 }
3748
3749 ifp->if_ipackets++;
3750#ifndef __NO_STRICT_ALIGNMENT
3751 /*
3752 * For architectures with strict alignment we must make sure
3753 * the payload is aligned.
3754 */
3755 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3756 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3757 cur_rx->bge_len);
3758 m->m_data += ETHER_ALIGN;
3759 }
3760#endif
3761 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3762 m->m_pkthdr.rcvif = ifp;
3763
3764 if (ifp->if_capenable & IFCAP_RXCSUM)
3765 bge_rxcsum(sc, cur_rx, m);
3766
3767 /*
3768 * If we received a packet with a vlan tag,
3769 * attach that information to the packet.
3770 */
3771 if (have_tag) {
3772 m->m_pkthdr.ether_vtag = vlan_tag;
3773 m->m_flags |= M_VLANTAG;
3774 }
3775
3776 if (holdlck != 0) {
3777 BGE_UNLOCK(sc);
3778 (*ifp->if_input)(ifp, m);
3779 BGE_LOCK(sc);
3780 } else
3781 (*ifp->if_input)(ifp, m);
3782 rx_npkts++;
3783
3784 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3785 return (rx_npkts);
3786 }
3787
3788 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3789 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3790 if (stdcnt > 0)
3791 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3792 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3793
3794 if (jumbocnt > 0)
3795 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3796 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3797
3798 sc->bge_rx_saved_considx = rx_cons;
3799 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3800 if (stdcnt)
3801 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3802 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3803 if (jumbocnt)
3804 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3805 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3806#ifdef notyet
3807 /*
3808 * This register wraps very quickly under heavy packet drops.
3809 * If you need correct statistics, you can enable this check.
3810 */
3811 if (BGE_IS_5705_PLUS(sc))
3812 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3813#endif
3814 return (rx_npkts);
3815}
3816
3817static void
3818bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3819{
3820
3821 if (BGE_IS_5717_PLUS(sc)) {
3822 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3823 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3824 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3825 if ((cur_rx->bge_error_flag &
3826 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3827 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3828 }
3829 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3830 m->m_pkthdr.csum_data =
3831 cur_rx->bge_tcp_udp_csum;
3832 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3833 CSUM_PSEUDO_HDR;
3834 }
3835 }
3836 } else {
3837 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3838 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3839 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3840 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3841 }
3842 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3843 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3844 m->m_pkthdr.csum_data =
3845 cur_rx->bge_tcp_udp_csum;
3846 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3847 CSUM_PSEUDO_HDR;
3848 }
3849 }
3850}
3851
3852static void
3853bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3854{
3855 struct bge_tx_bd *cur_tx;
3856 struct ifnet *ifp;
3857
3858 BGE_LOCK_ASSERT(sc);
3859
3860 /* Nothing to do. */
3861 if (sc->bge_tx_saved_considx == tx_cons)
3862 return;
3863
3864 ifp = sc->bge_ifp;
3865
3866 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3867 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3868 /*
3869 * Go through our tx ring and free mbufs for those
3870 * frames that have been sent.
3871 */
3872 while (sc->bge_tx_saved_considx != tx_cons) {
3873 uint32_t idx;
3874
3875 idx = sc->bge_tx_saved_considx;
3876 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3877 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3878 ifp->if_opackets++;
3879 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3880 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3881 sc->bge_cdata.bge_tx_dmamap[idx],
3882 BUS_DMASYNC_POSTWRITE);
3883 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3884 sc->bge_cdata.bge_tx_dmamap[idx]);
3885 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3886 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3887 }
3888 sc->bge_txcnt--;
3889 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3890 }
3891
3892 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3893 if (sc->bge_txcnt == 0)
3894 sc->bge_timer = 0;
3895}
3896
3897#ifdef DEVICE_POLLING
3898static int
3899bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3900{
3901 struct bge_softc *sc = ifp->if_softc;
3902 uint16_t rx_prod, tx_cons;
3903 uint32_t statusword;
3904 int rx_npkts = 0;
3905
3906 BGE_LOCK(sc);
3907 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3908 BGE_UNLOCK(sc);
3909 return (rx_npkts);
3910 }
3911
3912 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3913 sc->bge_cdata.bge_status_map,
3914 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3915 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3916 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3917
3918 statusword = sc->bge_ldata.bge_status_block->bge_status;
3919 sc->bge_ldata.bge_status_block->bge_status = 0;
3920
3921 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3922 sc->bge_cdata.bge_status_map,
3923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3924
3925 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3926 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3927 sc->bge_link_evt++;
3928
3929 if (cmd == POLL_AND_CHECK_STATUS)
3930 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3931 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3932 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3933 bge_link_upd(sc);
3934
3935 sc->rxcycles = count;
3936 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3937 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3938 BGE_UNLOCK(sc);
3939 return (rx_npkts);
3940 }
3941 bge_txeof(sc, tx_cons);
3942 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3943 bge_start_locked(ifp);
3944
3945 BGE_UNLOCK(sc);
3946 return (rx_npkts);
3947}
3948#endif /* DEVICE_POLLING */
3949
3950static int
3951bge_msi_intr(void *arg)
3952{
3953 struct bge_softc *sc;
3954
3955 sc = (struct bge_softc *)arg;
3956 /*
3957 * This interrupt is not shared and controller already
3958 * disabled further interrupt.
3959 */
3960 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3961 return (FILTER_HANDLED);
3962}
3963
3964static void
3965bge_intr_task(void *arg, int pending)
3966{
3967 struct bge_softc *sc;
3968 struct ifnet *ifp;
3969 uint32_t status, status_tag;
3970 uint16_t rx_prod, tx_cons;
3971
3972 sc = (struct bge_softc *)arg;
3973 ifp = sc->bge_ifp;
3974
3975 BGE_LOCK(sc);
3976 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3977 BGE_UNLOCK(sc);
3978 return;
3979 }
3980
3981 /* Get updated status block. */
3982 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3983 sc->bge_cdata.bge_status_map,
3984 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3985
3986 /* Save producer/consumer indexess. */
3987 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3988 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3989 status = sc->bge_ldata.bge_status_block->bge_status;
3990 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3991 sc->bge_ldata.bge_status_block->bge_status = 0;
3992 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3993 sc->bge_cdata.bge_status_map,
3994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3995 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3996 status_tag = 0;
3997
3998 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3999 bge_link_upd(sc);
4000
4001 /* Let controller work. */
4002 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4003
4004 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4005 sc->bge_rx_saved_considx != rx_prod) {
4006 /* Check RX return ring producer/consumer. */
4007 BGE_UNLOCK(sc);
4008 bge_rxeof(sc, rx_prod, 0);
4009 BGE_LOCK(sc);
4010 }
4011 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4012 /* Check TX ring producer/consumer. */
4013 bge_txeof(sc, tx_cons);
4014 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4015 bge_start_locked(ifp);
4016 }
4017 BGE_UNLOCK(sc);
4018}
4019
4020static void
4021bge_intr(void *xsc)
4022{
4023 struct bge_softc *sc;
4024 struct ifnet *ifp;
4025 uint32_t statusword;
4026 uint16_t rx_prod, tx_cons;
4027
4028 sc = xsc;
4029
4030 BGE_LOCK(sc);
4031
4032 ifp = sc->bge_ifp;
4033
4034#ifdef DEVICE_POLLING
4035 if (ifp->if_capenable & IFCAP_POLLING) {
4036 BGE_UNLOCK(sc);
4037 return;
4038 }
4039#endif
4040
4041 /*
4042 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4043 * disable interrupts by writing nonzero like we used to, since with
4044 * our current organization this just gives complications and
4045 * pessimizations for re-enabling interrupts. We used to have races
4046 * instead of the necessary complications. Disabling interrupts
4047 * would just reduce the chance of a status update while we are
4048 * running (by switching to the interrupt-mode coalescence
4049 * parameters), but this chance is already very low so it is more
4050 * efficient to get another interrupt than prevent it.
4051 *
4052 * We do the ack first to ensure another interrupt if there is a
4053 * status update after the ack. We don't check for the status
4054 * changing later because it is more efficient to get another
4055 * interrupt than prevent it, not quite as above (not checking is
4056 * a smaller optimization than not toggling the interrupt enable,
4057 * since checking doesn't involve PCI accesses and toggling require
4058 * the status check). So toggling would probably be a pessimization
4059 * even with MSI. It would only be needed for using a task queue.
4060 */
4061 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4062
4063 /*
4064 * Do the mandatory PCI flush as well as get the link status.
4065 */
4066 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4067
4068 /* Make sure the descriptor ring indexes are coherent. */
4069 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4070 sc->bge_cdata.bge_status_map,
4071 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4072 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4073 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4074 sc->bge_ldata.bge_status_block->bge_status = 0;
4075 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4076 sc->bge_cdata.bge_status_map,
4077 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4078
4079 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4080 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4081 statusword || sc->bge_link_evt)
4082 bge_link_upd(sc);
4083
4084 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4085 /* Check RX return ring producer/consumer. */
4086 bge_rxeof(sc, rx_prod, 1);
4087 }
4088
4089 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4090 /* Check TX ring producer/consumer. */
4091 bge_txeof(sc, tx_cons);
4092 }
4093
4094 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4095 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4096 bge_start_locked(ifp);
4097
4098 BGE_UNLOCK(sc);
4099}
4100
4101static void
4102bge_asf_driver_up(struct bge_softc *sc)
4103{
4104 if (sc->bge_asf_mode & ASF_STACKUP) {
4105 /* Send ASF heartbeat aprox. every 2s */
4106 if (sc->bge_asf_count)
4107 sc->bge_asf_count --;
4108 else {
4109 sc->bge_asf_count = 2;
4110 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4111 BGE_FW_CMD_DRV_ALIVE);
4112 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4113 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4114 BGE_FW_HB_TIMEOUT_SEC);
4115 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4116 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4117 BGE_RX_CPU_DRV_EVENT);
4118 }
4119 }
4120}
4121
4122static void
4123bge_tick(void *xsc)
4124{
4125 struct bge_softc *sc = xsc;
4126 struct mii_data *mii = NULL;
4127
4128 BGE_LOCK_ASSERT(sc);
4129
4130 /* Synchronize with possible callout reset/stop. */
4131 if (callout_pending(&sc->bge_stat_ch) ||
4132 !callout_active(&sc->bge_stat_ch))
4133 return;
4134
4135 if (BGE_IS_5705_PLUS(sc))
4136 bge_stats_update_regs(sc);
4137 else
4138 bge_stats_update(sc);
4139
4140 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4141 mii = device_get_softc(sc->bge_miibus);
4142 /*
4143 * Do not touch PHY if we have link up. This could break
4144 * IPMI/ASF mode or produce extra input errors
4145 * (extra errors was reported for bcm5701 & bcm5704).
4146 */
4147 if (!sc->bge_link)
4148 mii_tick(mii);
4149 } else {
4150 /*
4151 * Since in TBI mode auto-polling can't be used we should poll
4152 * link status manually. Here we register pending link event
4153 * and trigger interrupt.
4154 */
4155#ifdef DEVICE_POLLING
4156 /* In polling mode we poll link state in bge_poll(). */
4157 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4158#endif
4159 {
4160 sc->bge_link_evt++;
4161 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4162 sc->bge_flags & BGE_FLAG_5788)
4163 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4164 else
4165 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4166 }
4167 }
4168
4169 bge_asf_driver_up(sc);
4170 bge_watchdog(sc);
4171
4172 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4173}
4174
4175static void
4176bge_stats_update_regs(struct bge_softc *sc)
4177{
4178 struct ifnet *ifp;
4179 struct bge_mac_stats *stats;
4180
4181 ifp = sc->bge_ifp;
4182 stats = &sc->bge_mac_stats;
4183
4184 stats->ifHCOutOctets +=
4185 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4186 stats->etherStatsCollisions +=
4187 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4188 stats->outXonSent +=
4189 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4190 stats->outXoffSent +=
4191 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4192 stats->dot3StatsInternalMacTransmitErrors +=
4193 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4194 stats->dot3StatsSingleCollisionFrames +=
4195 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4196 stats->dot3StatsMultipleCollisionFrames +=
4197 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4198 stats->dot3StatsDeferredTransmissions +=
4199 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4200 stats->dot3StatsExcessiveCollisions +=
4201 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4202 stats->dot3StatsLateCollisions +=
4203 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4204 stats->ifHCOutUcastPkts +=
4205 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4206 stats->ifHCOutMulticastPkts +=
4207 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4208 stats->ifHCOutBroadcastPkts +=
4209 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4210
4211 stats->ifHCInOctets +=
4212 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4213 stats->etherStatsFragments +=
4214 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4215 stats->ifHCInUcastPkts +=
4216 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4217 stats->ifHCInMulticastPkts +=
4218 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4219 stats->ifHCInBroadcastPkts +=
4220 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4221 stats->dot3StatsFCSErrors +=
4222 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4223 stats->dot3StatsAlignmentErrors +=
4224 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4225 stats->xonPauseFramesReceived +=
4226 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4227 stats->xoffPauseFramesReceived +=
4228 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4229 stats->macControlFramesReceived +=
4230 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4231 stats->xoffStateEntered +=
4232 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4233 stats->dot3StatsFramesTooLong +=
4234 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4235 stats->etherStatsJabbers +=
4236 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4237 stats->etherStatsUndersizePkts +=
4238 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4239
4240 stats->FramesDroppedDueToFilters +=
4241 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4242 stats->DmaWriteQueueFull +=
4243 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4244 stats->DmaWriteHighPriQueueFull +=
4245 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4246 stats->NoMoreRxBDs +=
4247 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4248 stats->InputDiscards +=
4249 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4250 stats->InputErrors +=
4251 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4252 stats->RecvThresholdHit +=
4253 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4254
4255 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4256 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4257 stats->InputErrors);
4258}
4259
4260static void
4261bge_stats_clear_regs(struct bge_softc *sc)
4262{
4263
4264 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4265 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4266 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4267 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4268 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4269 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4270 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4271 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4272 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4273 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4274 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4275 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4276 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4277
4278 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4279 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4280 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4281 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4282 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4283 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4284 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4285 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4286 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4287 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4288 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4289 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4290 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4291 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4292
4293 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4294 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4295 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4296 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4297 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4298 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4299 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4300}
4301
4302static void
4303bge_stats_update(struct bge_softc *sc)
4304{
4305 struct ifnet *ifp;
4306 bus_size_t stats;
4307 uint32_t cnt; /* current register value */
4308
4309 ifp = sc->bge_ifp;
4310
4311 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4312
4313#define READ_STAT(sc, stats, stat) \
4314 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4315
4316 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4317 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4318 sc->bge_tx_collisions = cnt;
4319
4320 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4321 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4322 sc->bge_rx_discards = cnt;
4323
4324 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4325 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4326 sc->bge_tx_discards = cnt;
4327
4328#undef READ_STAT
4329}
4330
4331/*
4332 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4333 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4334 * but when such padded frames employ the bge IP/TCP checksum offload,
4335 * the hardware checksum assist gives incorrect results (possibly
4336 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4337 * If we pad such runts with zeros, the onboard checksum comes out correct.
4338 */
4339static __inline int
4340bge_cksum_pad(struct mbuf *m)
4341{
4342 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4343 struct mbuf *last;
4344
4345 /* If there's only the packet-header and we can pad there, use it. */
4346 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4347 M_TRAILINGSPACE(m) >= padlen) {
4348 last = m;
4349 } else {
4350 /*
4351 * Walk packet chain to find last mbuf. We will either
4352 * pad there, or append a new mbuf and pad it.
4353 */
4354 for (last = m; last->m_next != NULL; last = last->m_next);
4355 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4356 /* Allocate new empty mbuf, pad it. Compact later. */
4357 struct mbuf *n;
4358
4359 MGET(n, M_DONTWAIT, MT_DATA);
4360 if (n == NULL)
4361 return (ENOBUFS);
4362 n->m_len = 0;
4363 last->m_next = n;
4364 last = n;
4365 }
4366 }
4367
4368 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4369 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4370 last->m_len += padlen;
4371 m->m_pkthdr.len += padlen;
4372
4373 return (0);
4374}
4375
4376static struct mbuf *
4377bge_check_short_dma(struct mbuf *m)
4378{
4379 struct mbuf *n;
4380 int found;
4381
4382 /*
4383 * If device receive two back-to-back send BDs with less than
4384 * or equal to 8 total bytes then the device may hang. The two
4385 * back-to-back send BDs must in the same frame for this failure
4386 * to occur. Scan mbuf chains and see whether two back-to-back
4387 * send BDs are there. If this is the case, allocate new mbuf
4388 * and copy the frame to workaround the silicon bug.
4389 */
4390 for (n = m, found = 0; n != NULL; n = n->m_next) {
4391 if (n->m_len < 8) {
4392 found++;
4393 if (found > 1)
4394 break;
4395 continue;
4396 }
4397 found = 0;
4398 }
4399
4400 if (found > 1) {
4401 n = m_defrag(m, M_DONTWAIT);
4402 if (n == NULL)
4403 m_freem(m);
4404 } else
4405 n = m;
4406 return (n);
4407}
4408
4409static struct mbuf *
4410bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4411 uint16_t *flags)
4412{
4413 struct ip *ip;
4414 struct tcphdr *tcp;
4415 struct mbuf *n;
4416 uint16_t hlen;
4417 uint32_t poff;
4418
4419 if (M_WRITABLE(m) == 0) {
4420 /* Get a writable copy. */
4421 n = m_dup(m, M_DONTWAIT);
4422 m_freem(m);
4423 if (n == NULL)
4424 return (NULL);
4425 m = n;
4426 }
4427 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4428 if (m == NULL)
4429 return (NULL);
4430 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4431 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4432 m = m_pullup(m, poff + sizeof(struct tcphdr));
4433 if (m == NULL)
4434 return (NULL);
4435 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4436 m = m_pullup(m, poff + (tcp->th_off << 2));
4437 if (m == NULL)
4438 return (NULL);
4439 /*
4440 * It seems controller doesn't modify IP length and TCP pseudo
4441 * checksum. These checksum computed by upper stack should be 0.
4442 */
4443 *mss = m->m_pkthdr.tso_segsz;
4444 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4445 ip->ip_sum = 0;
4446 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4447 /* Clear pseudo checksum computed by TCP stack. */
4448 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4449 tcp->th_sum = 0;
4450 /*
4451 * Broadcom controllers uses different descriptor format for
4452 * TSO depending on ASIC revision. Due to TSO-capable firmware
4453 * license issue and lower performance of firmware based TSO
4454 * we only support hardware based TSO.
4455 */
4456 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4457 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4458 if (sc->bge_flags & BGE_FLAG_TSO3) {
4459 /*
4460 * For BCM5717 and newer controllers, hardware based TSO
4461 * uses the 14 lower bits of the bge_mss field to store the
4462 * MSS and the upper 2 bits to store the lowest 2 bits of
4463 * the IP/TCP header length. The upper 6 bits of the header
4464 * length are stored in the bge_flags[14:10,4] field. Jumbo
4465 * frames are supported.
4466 */
4467 *mss |= ((hlen & 0x3) << 14);
4468 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4469 } else {
4470 /*
4471 * For BCM5755 and newer controllers, hardware based TSO uses
4472 * the lower 11 bits to store the MSS and the upper 5 bits to
4473 * store the IP/TCP header length. Jumbo frames are not
4474 * supported.
4475 */
4476 *mss |= (hlen << 11);
4477 }
4478 return (m);
4479}
4480
4481/*
4482 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4483 * pointers to descriptors.
4484 */
4485static int
4486bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4487{
4488 bus_dma_segment_t segs[BGE_NSEG_NEW];
4489 bus_dmamap_t map;
4490 struct bge_tx_bd *d;
4491 struct mbuf *m = *m_head;
4492 uint32_t idx = *txidx;
4493 uint16_t csum_flags, mss, vlan_tag;
4494 int nsegs, i, error;
4495
4496 csum_flags = 0;
4497 mss = 0;
4498 vlan_tag = 0;
4499 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4500 m->m_next != NULL) {
4501 *m_head = bge_check_short_dma(m);
4502 if (*m_head == NULL)
4503 return (ENOBUFS);
4504 m = *m_head;
4505 }
4506 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4507 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4508 if (*m_head == NULL)
4509 return (ENOBUFS);
4510 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4511 BGE_TXBDFLAG_CPU_POST_DMA;
4512 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4513 if (m->m_pkthdr.csum_flags & CSUM_IP)
4514 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4515 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4516 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4517 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4518 (error = bge_cksum_pad(m)) != 0) {
4519 m_freem(m);
4520 *m_head = NULL;
4521 return (error);
4522 }
4523 }
4524 if (m->m_flags & M_LASTFRAG)
4525 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4526 else if (m->m_flags & M_FRAG)
4527 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4528 }
4529
4530 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4531 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4532 m->m_pkthdr.len > ETHER_MAX_LEN)
4533 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4534 if (sc->bge_forced_collapse > 0 &&
4535 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4536 /*
4537 * Forcedly collapse mbuf chains to overcome hardware
4538 * limitation which only support a single outstanding
4539 * DMA read operation.
4540 */
4541 if (sc->bge_forced_collapse == 1)
4542 m = m_defrag(m, M_DONTWAIT);
4543 else
4544 m = m_collapse(m, M_DONTWAIT,
4545 sc->bge_forced_collapse);
4546 if (m == NULL)
4547 m = *m_head;
4548 *m_head = m;
4549 }
4550 }
4551
4552 map = sc->bge_cdata.bge_tx_dmamap[idx];
4553 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4554 &nsegs, BUS_DMA_NOWAIT);
4555 if (error == EFBIG) {
4556 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4557 if (m == NULL) {
4558 m_freem(*m_head);
4559 *m_head = NULL;
4560 return (ENOBUFS);
4561 }
4562 *m_head = m;
4563 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4564 m, segs, &nsegs, BUS_DMA_NOWAIT);
4565 if (error) {
4566 m_freem(m);
4567 *m_head = NULL;
4568 return (error);
4569 }
4570 } else if (error != 0)
4571 return (error);
4572
4573 /* Check if we have enough free send BDs. */
4574 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4575 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4576 return (ENOBUFS);
4577 }
4578
4579 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4580
4581 if (m->m_flags & M_VLANTAG) {
4582 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4583 vlan_tag = m->m_pkthdr.ether_vtag;
4584 }
4585 for (i = 0; ; i++) {
4586 d = &sc->bge_ldata.bge_tx_ring[idx];
4587 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4588 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4589 d->bge_len = segs[i].ds_len;
4590 d->bge_flags = csum_flags;
4591 d->bge_vlan_tag = vlan_tag;
4592 d->bge_mss = mss;
4593 if (i == nsegs - 1)
4594 break;
4595 BGE_INC(idx, BGE_TX_RING_CNT);
4596 }
4597
4598 /* Mark the last segment as end of packet... */
4599 d->bge_flags |= BGE_TXBDFLAG_END;
4600
4601 /*
4602 * Insure that the map for this transmission
4603 * is placed at the array index of the last descriptor
4604 * in this chain.
4605 */
4606 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4607 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4608 sc->bge_cdata.bge_tx_chain[idx] = m;
4609 sc->bge_txcnt += nsegs;
4610
4611 BGE_INC(idx, BGE_TX_RING_CNT);
4612 *txidx = idx;
4613
4614 return (0);
4615}
4616
4617/*
4618 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4619 * to the mbuf data regions directly in the transmit descriptors.
4620 */
4621static void
4622bge_start_locked(struct ifnet *ifp)
4623{
4624 struct bge_softc *sc;
4625 struct mbuf *m_head;
4626 uint32_t prodidx;
4627 int count;
4628
4629 sc = ifp->if_softc;
4630 BGE_LOCK_ASSERT(sc);
4631
4632 if (!sc->bge_link ||
4633 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4634 IFF_DRV_RUNNING)
4635 return;
4636
4637 prodidx = sc->bge_tx_prodidx;
4638
4639 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4640 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4641 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4642 break;
4643 }
4644 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4645 if (m_head == NULL)
4646 break;
4647
4648 /*
4649 * XXX
4650 * The code inside the if() block is never reached since we
4651 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4652 * requests to checksum TCP/UDP in a fragmented packet.
4653 *
4654 * XXX
4655 * safety overkill. If this is a fragmented packet chain
4656 * with delayed TCP/UDP checksums, then only encapsulate
4657 * it if we have enough descriptors to handle the entire
4658 * chain at once.
4659 * (paranoia -- may not actually be needed)
4660 */
4661 if (m_head->m_flags & M_FIRSTFRAG &&
4662 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4663 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4664 m_head->m_pkthdr.csum_data + 16) {
4665 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4666 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4667 break;
4668 }
4669 }
4670
4671 /*
4672 * Pack the data into the transmit ring. If we
4673 * don't have room, set the OACTIVE flag and wait
4674 * for the NIC to drain the ring.
4675 */
4676 if (bge_encap(sc, &m_head, &prodidx)) {
4677 if (m_head == NULL)
4678 break;
4679 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4680 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4681 break;
4682 }
4683 ++count;
4684
4685 /*
4686 * If there's a BPF listener, bounce a copy of this frame
4687 * to him.
4688 */
4689#ifdef ETHER_BPF_MTAP
4690 ETHER_BPF_MTAP(ifp, m_head);
4691#else
4692 BPF_MTAP(ifp, m_head);
4693#endif
4694 }
4695
4696 if (count > 0) {
4697 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4698 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4699 /* Transmit. */
4700 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4701 /* 5700 b2 errata */
4702 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4703 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4704
4705 sc->bge_tx_prodidx = prodidx;
4706
4707 /*
4708 * Set a timeout in case the chip goes out to lunch.
4709 */
4710 sc->bge_timer = 5;
4711 }
4712}
4713
4714/*
4715 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4716 * to the mbuf data regions directly in the transmit descriptors.
4717 */
4718static void
4719bge_start(struct ifnet *ifp)
4720{
4721 struct bge_softc *sc;
4722
4723 sc = ifp->if_softc;
4724 BGE_LOCK(sc);
4725 bge_start_locked(ifp);
4726 BGE_UNLOCK(sc);
4727}
4728
4729static void
4730bge_init_locked(struct bge_softc *sc)
4731{
4732 struct ifnet *ifp;
4733 uint16_t *m;
4734 uint32_t mode;
4735
4736 BGE_LOCK_ASSERT(sc);
4737
4738 ifp = sc->bge_ifp;
4739
4740 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4741 return;
4742
4743 /* Cancel pending I/O and flush buffers. */
4744 bge_stop(sc);
4745
4746 bge_stop_fw(sc);
4747 bge_sig_pre_reset(sc, BGE_RESET_START);
4748 bge_reset(sc);
4749 bge_sig_legacy(sc, BGE_RESET_START);
4750 bge_sig_post_reset(sc, BGE_RESET_START);
4751
4752 bge_chipinit(sc);
4753
4754 /*
4755 * Init the various state machines, ring
4756 * control blocks and firmware.
4757 */
4758 if (bge_blockinit(sc)) {
4759 device_printf(sc->bge_dev, "initialization failure\n");
4760 return;
4761 }
4762
4763 ifp = sc->bge_ifp;
4764
4765 /* Specify MTU. */
4766 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4767 ETHER_HDR_LEN + ETHER_CRC_LEN +
4768 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4769
4770 /* Load our MAC address. */
4771 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4772 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4773 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4774
4775 /* Program promiscuous mode. */
4776 bge_setpromisc(sc);
4777
4778 /* Program multicast filter. */
4779 bge_setmulti(sc);
4780
4781 /* Program VLAN tag stripping. */
4782 bge_setvlan(sc);
4783
4784 /* Override UDP checksum offloading. */
4785 if (sc->bge_forced_udpcsum == 0)
4786 sc->bge_csum_features &= ~CSUM_UDP;
4787 else
4788 sc->bge_csum_features |= CSUM_UDP;
4789 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4790 ifp->if_capenable & IFCAP_TXCSUM) {
4791 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4792 ifp->if_hwassist |= sc->bge_csum_features;
4793 }
4794
4795 /* Init RX ring. */
4796 if (bge_init_rx_ring_std(sc) != 0) {
4797 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4798 bge_stop(sc);
4799 return;
4800 }
4801
4802 /*
4803 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4804 * memory to insure that the chip has in fact read the first
4805 * entry of the ring.
4806 */
4807 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4808 uint32_t v, i;
4809 for (i = 0; i < 10; i++) {
4810 DELAY(20);
4811 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4812 if (v == (MCLBYTES - ETHER_ALIGN))
4813 break;
4814 }
4815 if (i == 10)
4816 device_printf (sc->bge_dev,
4817 "5705 A0 chip failed to load RX ring\n");
4818 }
4819
4820 /* Init jumbo RX ring. */
4821 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4822 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4823 (MCLBYTES - ETHER_ALIGN)) {
4824 if (bge_init_rx_ring_jumbo(sc) != 0) {
4825 device_printf(sc->bge_dev,
4826 "no memory for jumbo Rx buffers.\n");
4827 bge_stop(sc);
4828 return;
4829 }
4830 }
4831
4832 /* Init our RX return ring index. */
4833 sc->bge_rx_saved_considx = 0;
4834
4835 /* Init our RX/TX stat counters. */
4836 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4837
4838 /* Init TX ring. */
4839 bge_init_tx_ring(sc);
4840
4841 /* Enable TX MAC state machine lockup fix. */
4842 mode = CSR_READ_4(sc, BGE_TX_MODE);
4843 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4844 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3677 return (0);
3678}
3679
3680static __inline void
3681bge_rxreuse_std(struct bge_softc *sc, int i)
3682{
3683 struct bge_rx_bd *r;
3684
3685 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3686 r->bge_flags = BGE_RXBDFLAG_END;
3687 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3688 r->bge_idx = i;
3689 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3690}
3691
3692static __inline void
3693bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3694{
3695 struct bge_extrx_bd *r;
3696
3697 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3698 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3699 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3700 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3701 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3702 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3703 r->bge_idx = i;
3704 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3705}
3706
3707/*
3708 * Frame reception handling. This is called if there's a frame
3709 * on the receive return list.
3710 *
3711 * Note: we have to be able to handle two possibilities here:
3712 * 1) the frame is from the jumbo receive ring
3713 * 2) the frame is from the standard receive ring
3714 */
3715
3716static int
3717bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3718{
3719 struct ifnet *ifp;
3720 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3721 uint16_t rx_cons;
3722
3723 rx_cons = sc->bge_rx_saved_considx;
3724
3725 /* Nothing to do. */
3726 if (rx_cons == rx_prod)
3727 return (rx_npkts);
3728
3729 ifp = sc->bge_ifp;
3730
3731 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3732 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3733 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3734 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3735 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3736 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3737 (MCLBYTES - ETHER_ALIGN))
3738 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3739 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3740
3741 while (rx_cons != rx_prod) {
3742 struct bge_rx_bd *cur_rx;
3743 uint32_t rxidx;
3744 struct mbuf *m = NULL;
3745 uint16_t vlan_tag = 0;
3746 int have_tag = 0;
3747
3748#ifdef DEVICE_POLLING
3749 if (ifp->if_capenable & IFCAP_POLLING) {
3750 if (sc->rxcycles <= 0)
3751 break;
3752 sc->rxcycles--;
3753 }
3754#endif
3755
3756 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3757
3758 rxidx = cur_rx->bge_idx;
3759 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3760
3761 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3762 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3763 have_tag = 1;
3764 vlan_tag = cur_rx->bge_vlan_tag;
3765 }
3766
3767 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3768 jumbocnt++;
3769 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3770 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3771 bge_rxreuse_jumbo(sc, rxidx);
3772 continue;
3773 }
3774 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3775 bge_rxreuse_jumbo(sc, rxidx);
3776 ifp->if_iqdrops++;
3777 continue;
3778 }
3779 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3780 } else {
3781 stdcnt++;
3782 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3783 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3784 bge_rxreuse_std(sc, rxidx);
3785 continue;
3786 }
3787 if (bge_newbuf_std(sc, rxidx) != 0) {
3788 bge_rxreuse_std(sc, rxidx);
3789 ifp->if_iqdrops++;
3790 continue;
3791 }
3792 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3793 }
3794
3795 ifp->if_ipackets++;
3796#ifndef __NO_STRICT_ALIGNMENT
3797 /*
3798 * For architectures with strict alignment we must make sure
3799 * the payload is aligned.
3800 */
3801 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3802 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3803 cur_rx->bge_len);
3804 m->m_data += ETHER_ALIGN;
3805 }
3806#endif
3807 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3808 m->m_pkthdr.rcvif = ifp;
3809
3810 if (ifp->if_capenable & IFCAP_RXCSUM)
3811 bge_rxcsum(sc, cur_rx, m);
3812
3813 /*
3814 * If we received a packet with a vlan tag,
3815 * attach that information to the packet.
3816 */
3817 if (have_tag) {
3818 m->m_pkthdr.ether_vtag = vlan_tag;
3819 m->m_flags |= M_VLANTAG;
3820 }
3821
3822 if (holdlck != 0) {
3823 BGE_UNLOCK(sc);
3824 (*ifp->if_input)(ifp, m);
3825 BGE_LOCK(sc);
3826 } else
3827 (*ifp->if_input)(ifp, m);
3828 rx_npkts++;
3829
3830 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3831 return (rx_npkts);
3832 }
3833
3834 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3835 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3836 if (stdcnt > 0)
3837 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3838 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3839
3840 if (jumbocnt > 0)
3841 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3842 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3843
3844 sc->bge_rx_saved_considx = rx_cons;
3845 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3846 if (stdcnt)
3847 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3848 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3849 if (jumbocnt)
3850 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3851 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3852#ifdef notyet
3853 /*
3854 * This register wraps very quickly under heavy packet drops.
3855 * If you need correct statistics, you can enable this check.
3856 */
3857 if (BGE_IS_5705_PLUS(sc))
3858 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3859#endif
3860 return (rx_npkts);
3861}
3862
3863static void
3864bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3865{
3866
3867 if (BGE_IS_5717_PLUS(sc)) {
3868 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3869 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3870 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3871 if ((cur_rx->bge_error_flag &
3872 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3873 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3874 }
3875 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3876 m->m_pkthdr.csum_data =
3877 cur_rx->bge_tcp_udp_csum;
3878 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3879 CSUM_PSEUDO_HDR;
3880 }
3881 }
3882 } else {
3883 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3884 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3885 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3886 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3887 }
3888 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3889 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3890 m->m_pkthdr.csum_data =
3891 cur_rx->bge_tcp_udp_csum;
3892 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3893 CSUM_PSEUDO_HDR;
3894 }
3895 }
3896}
3897
3898static void
3899bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3900{
3901 struct bge_tx_bd *cur_tx;
3902 struct ifnet *ifp;
3903
3904 BGE_LOCK_ASSERT(sc);
3905
3906 /* Nothing to do. */
3907 if (sc->bge_tx_saved_considx == tx_cons)
3908 return;
3909
3910 ifp = sc->bge_ifp;
3911
3912 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3913 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3914 /*
3915 * Go through our tx ring and free mbufs for those
3916 * frames that have been sent.
3917 */
3918 while (sc->bge_tx_saved_considx != tx_cons) {
3919 uint32_t idx;
3920
3921 idx = sc->bge_tx_saved_considx;
3922 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3923 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3924 ifp->if_opackets++;
3925 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3926 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3927 sc->bge_cdata.bge_tx_dmamap[idx],
3928 BUS_DMASYNC_POSTWRITE);
3929 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3930 sc->bge_cdata.bge_tx_dmamap[idx]);
3931 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3932 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3933 }
3934 sc->bge_txcnt--;
3935 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3936 }
3937
3938 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3939 if (sc->bge_txcnt == 0)
3940 sc->bge_timer = 0;
3941}
3942
3943#ifdef DEVICE_POLLING
3944static int
3945bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3946{
3947 struct bge_softc *sc = ifp->if_softc;
3948 uint16_t rx_prod, tx_cons;
3949 uint32_t statusword;
3950 int rx_npkts = 0;
3951
3952 BGE_LOCK(sc);
3953 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3954 BGE_UNLOCK(sc);
3955 return (rx_npkts);
3956 }
3957
3958 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3959 sc->bge_cdata.bge_status_map,
3960 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3961 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3962 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3963
3964 statusword = sc->bge_ldata.bge_status_block->bge_status;
3965 sc->bge_ldata.bge_status_block->bge_status = 0;
3966
3967 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3968 sc->bge_cdata.bge_status_map,
3969 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3970
3971 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3972 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3973 sc->bge_link_evt++;
3974
3975 if (cmd == POLL_AND_CHECK_STATUS)
3976 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3977 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3978 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3979 bge_link_upd(sc);
3980
3981 sc->rxcycles = count;
3982 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3983 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3984 BGE_UNLOCK(sc);
3985 return (rx_npkts);
3986 }
3987 bge_txeof(sc, tx_cons);
3988 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3989 bge_start_locked(ifp);
3990
3991 BGE_UNLOCK(sc);
3992 return (rx_npkts);
3993}
3994#endif /* DEVICE_POLLING */
3995
3996static int
3997bge_msi_intr(void *arg)
3998{
3999 struct bge_softc *sc;
4000
4001 sc = (struct bge_softc *)arg;
4002 /*
4003 * This interrupt is not shared and controller already
4004 * disabled further interrupt.
4005 */
4006 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4007 return (FILTER_HANDLED);
4008}
4009
4010static void
4011bge_intr_task(void *arg, int pending)
4012{
4013 struct bge_softc *sc;
4014 struct ifnet *ifp;
4015 uint32_t status, status_tag;
4016 uint16_t rx_prod, tx_cons;
4017
4018 sc = (struct bge_softc *)arg;
4019 ifp = sc->bge_ifp;
4020
4021 BGE_LOCK(sc);
4022 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4023 BGE_UNLOCK(sc);
4024 return;
4025 }
4026
4027 /* Get updated status block. */
4028 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4029 sc->bge_cdata.bge_status_map,
4030 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4031
4032 /* Save producer/consumer indexess. */
4033 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4034 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4035 status = sc->bge_ldata.bge_status_block->bge_status;
4036 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4037 sc->bge_ldata.bge_status_block->bge_status = 0;
4038 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4039 sc->bge_cdata.bge_status_map,
4040 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4041 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4042 status_tag = 0;
4043
4044 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4045 bge_link_upd(sc);
4046
4047 /* Let controller work. */
4048 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4049
4050 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4051 sc->bge_rx_saved_considx != rx_prod) {
4052 /* Check RX return ring producer/consumer. */
4053 BGE_UNLOCK(sc);
4054 bge_rxeof(sc, rx_prod, 0);
4055 BGE_LOCK(sc);
4056 }
4057 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4058 /* Check TX ring producer/consumer. */
4059 bge_txeof(sc, tx_cons);
4060 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4061 bge_start_locked(ifp);
4062 }
4063 BGE_UNLOCK(sc);
4064}
4065
4066static void
4067bge_intr(void *xsc)
4068{
4069 struct bge_softc *sc;
4070 struct ifnet *ifp;
4071 uint32_t statusword;
4072 uint16_t rx_prod, tx_cons;
4073
4074 sc = xsc;
4075
4076 BGE_LOCK(sc);
4077
4078 ifp = sc->bge_ifp;
4079
4080#ifdef DEVICE_POLLING
4081 if (ifp->if_capenable & IFCAP_POLLING) {
4082 BGE_UNLOCK(sc);
4083 return;
4084 }
4085#endif
4086
4087 /*
4088 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4089 * disable interrupts by writing nonzero like we used to, since with
4090 * our current organization this just gives complications and
4091 * pessimizations for re-enabling interrupts. We used to have races
4092 * instead of the necessary complications. Disabling interrupts
4093 * would just reduce the chance of a status update while we are
4094 * running (by switching to the interrupt-mode coalescence
4095 * parameters), but this chance is already very low so it is more
4096 * efficient to get another interrupt than prevent it.
4097 *
4098 * We do the ack first to ensure another interrupt if there is a
4099 * status update after the ack. We don't check for the status
4100 * changing later because it is more efficient to get another
4101 * interrupt than prevent it, not quite as above (not checking is
4102 * a smaller optimization than not toggling the interrupt enable,
4103 * since checking doesn't involve PCI accesses and toggling require
4104 * the status check). So toggling would probably be a pessimization
4105 * even with MSI. It would only be needed for using a task queue.
4106 */
4107 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4108
4109 /*
4110 * Do the mandatory PCI flush as well as get the link status.
4111 */
4112 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4113
4114 /* Make sure the descriptor ring indexes are coherent. */
4115 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4116 sc->bge_cdata.bge_status_map,
4117 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4118 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4119 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4120 sc->bge_ldata.bge_status_block->bge_status = 0;
4121 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4122 sc->bge_cdata.bge_status_map,
4123 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4124
4125 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4126 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4127 statusword || sc->bge_link_evt)
4128 bge_link_upd(sc);
4129
4130 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4131 /* Check RX return ring producer/consumer. */
4132 bge_rxeof(sc, rx_prod, 1);
4133 }
4134
4135 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4136 /* Check TX ring producer/consumer. */
4137 bge_txeof(sc, tx_cons);
4138 }
4139
4140 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4141 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4142 bge_start_locked(ifp);
4143
4144 BGE_UNLOCK(sc);
4145}
4146
4147static void
4148bge_asf_driver_up(struct bge_softc *sc)
4149{
4150 if (sc->bge_asf_mode & ASF_STACKUP) {
4151 /* Send ASF heartbeat aprox. every 2s */
4152 if (sc->bge_asf_count)
4153 sc->bge_asf_count --;
4154 else {
4155 sc->bge_asf_count = 2;
4156 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4157 BGE_FW_CMD_DRV_ALIVE);
4158 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4159 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4160 BGE_FW_HB_TIMEOUT_SEC);
4161 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4162 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4163 BGE_RX_CPU_DRV_EVENT);
4164 }
4165 }
4166}
4167
4168static void
4169bge_tick(void *xsc)
4170{
4171 struct bge_softc *sc = xsc;
4172 struct mii_data *mii = NULL;
4173
4174 BGE_LOCK_ASSERT(sc);
4175
4176 /* Synchronize with possible callout reset/stop. */
4177 if (callout_pending(&sc->bge_stat_ch) ||
4178 !callout_active(&sc->bge_stat_ch))
4179 return;
4180
4181 if (BGE_IS_5705_PLUS(sc))
4182 bge_stats_update_regs(sc);
4183 else
4184 bge_stats_update(sc);
4185
4186 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4187 mii = device_get_softc(sc->bge_miibus);
4188 /*
4189 * Do not touch PHY if we have link up. This could break
4190 * IPMI/ASF mode or produce extra input errors
4191 * (extra errors was reported for bcm5701 & bcm5704).
4192 */
4193 if (!sc->bge_link)
4194 mii_tick(mii);
4195 } else {
4196 /*
4197 * Since in TBI mode auto-polling can't be used we should poll
4198 * link status manually. Here we register pending link event
4199 * and trigger interrupt.
4200 */
4201#ifdef DEVICE_POLLING
4202 /* In polling mode we poll link state in bge_poll(). */
4203 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4204#endif
4205 {
4206 sc->bge_link_evt++;
4207 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4208 sc->bge_flags & BGE_FLAG_5788)
4209 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4210 else
4211 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4212 }
4213 }
4214
4215 bge_asf_driver_up(sc);
4216 bge_watchdog(sc);
4217
4218 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4219}
4220
4221static void
4222bge_stats_update_regs(struct bge_softc *sc)
4223{
4224 struct ifnet *ifp;
4225 struct bge_mac_stats *stats;
4226
4227 ifp = sc->bge_ifp;
4228 stats = &sc->bge_mac_stats;
4229
4230 stats->ifHCOutOctets +=
4231 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4232 stats->etherStatsCollisions +=
4233 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4234 stats->outXonSent +=
4235 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4236 stats->outXoffSent +=
4237 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4238 stats->dot3StatsInternalMacTransmitErrors +=
4239 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4240 stats->dot3StatsSingleCollisionFrames +=
4241 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4242 stats->dot3StatsMultipleCollisionFrames +=
4243 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4244 stats->dot3StatsDeferredTransmissions +=
4245 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4246 stats->dot3StatsExcessiveCollisions +=
4247 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4248 stats->dot3StatsLateCollisions +=
4249 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4250 stats->ifHCOutUcastPkts +=
4251 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4252 stats->ifHCOutMulticastPkts +=
4253 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4254 stats->ifHCOutBroadcastPkts +=
4255 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4256
4257 stats->ifHCInOctets +=
4258 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4259 stats->etherStatsFragments +=
4260 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4261 stats->ifHCInUcastPkts +=
4262 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4263 stats->ifHCInMulticastPkts +=
4264 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4265 stats->ifHCInBroadcastPkts +=
4266 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4267 stats->dot3StatsFCSErrors +=
4268 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4269 stats->dot3StatsAlignmentErrors +=
4270 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4271 stats->xonPauseFramesReceived +=
4272 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4273 stats->xoffPauseFramesReceived +=
4274 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4275 stats->macControlFramesReceived +=
4276 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4277 stats->xoffStateEntered +=
4278 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4279 stats->dot3StatsFramesTooLong +=
4280 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4281 stats->etherStatsJabbers +=
4282 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4283 stats->etherStatsUndersizePkts +=
4284 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4285
4286 stats->FramesDroppedDueToFilters +=
4287 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4288 stats->DmaWriteQueueFull +=
4289 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4290 stats->DmaWriteHighPriQueueFull +=
4291 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4292 stats->NoMoreRxBDs +=
4293 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4294 stats->InputDiscards +=
4295 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4296 stats->InputErrors +=
4297 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4298 stats->RecvThresholdHit +=
4299 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4300
4301 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4302 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4303 stats->InputErrors);
4304}
4305
4306static void
4307bge_stats_clear_regs(struct bge_softc *sc)
4308{
4309
4310 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4311 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4312 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4313 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4314 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4315 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4316 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4317 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4318 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4319 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4320 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4321 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4322 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4323
4324 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4325 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4326 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4327 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4328 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4329 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4330 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4331 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4332 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4333 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4334 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4335 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4336 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4337 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4338
4339 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4340 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4341 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4342 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4343 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4344 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4345 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4346}
4347
4348static void
4349bge_stats_update(struct bge_softc *sc)
4350{
4351 struct ifnet *ifp;
4352 bus_size_t stats;
4353 uint32_t cnt; /* current register value */
4354
4355 ifp = sc->bge_ifp;
4356
4357 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4358
4359#define READ_STAT(sc, stats, stat) \
4360 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4361
4362 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4363 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4364 sc->bge_tx_collisions = cnt;
4365
4366 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4367 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4368 sc->bge_rx_discards = cnt;
4369
4370 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4371 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4372 sc->bge_tx_discards = cnt;
4373
4374#undef READ_STAT
4375}
4376
4377/*
4378 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4379 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4380 * but when such padded frames employ the bge IP/TCP checksum offload,
4381 * the hardware checksum assist gives incorrect results (possibly
4382 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4383 * If we pad such runts with zeros, the onboard checksum comes out correct.
4384 */
4385static __inline int
4386bge_cksum_pad(struct mbuf *m)
4387{
4388 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4389 struct mbuf *last;
4390
4391 /* If there's only the packet-header and we can pad there, use it. */
4392 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4393 M_TRAILINGSPACE(m) >= padlen) {
4394 last = m;
4395 } else {
4396 /*
4397 * Walk packet chain to find last mbuf. We will either
4398 * pad there, or append a new mbuf and pad it.
4399 */
4400 for (last = m; last->m_next != NULL; last = last->m_next);
4401 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4402 /* Allocate new empty mbuf, pad it. Compact later. */
4403 struct mbuf *n;
4404
4405 MGET(n, M_DONTWAIT, MT_DATA);
4406 if (n == NULL)
4407 return (ENOBUFS);
4408 n->m_len = 0;
4409 last->m_next = n;
4410 last = n;
4411 }
4412 }
4413
4414 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4415 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4416 last->m_len += padlen;
4417 m->m_pkthdr.len += padlen;
4418
4419 return (0);
4420}
4421
4422static struct mbuf *
4423bge_check_short_dma(struct mbuf *m)
4424{
4425 struct mbuf *n;
4426 int found;
4427
4428 /*
4429 * If device receive two back-to-back send BDs with less than
4430 * or equal to 8 total bytes then the device may hang. The two
4431 * back-to-back send BDs must in the same frame for this failure
4432 * to occur. Scan mbuf chains and see whether two back-to-back
4433 * send BDs are there. If this is the case, allocate new mbuf
4434 * and copy the frame to workaround the silicon bug.
4435 */
4436 for (n = m, found = 0; n != NULL; n = n->m_next) {
4437 if (n->m_len < 8) {
4438 found++;
4439 if (found > 1)
4440 break;
4441 continue;
4442 }
4443 found = 0;
4444 }
4445
4446 if (found > 1) {
4447 n = m_defrag(m, M_DONTWAIT);
4448 if (n == NULL)
4449 m_freem(m);
4450 } else
4451 n = m;
4452 return (n);
4453}
4454
4455static struct mbuf *
4456bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4457 uint16_t *flags)
4458{
4459 struct ip *ip;
4460 struct tcphdr *tcp;
4461 struct mbuf *n;
4462 uint16_t hlen;
4463 uint32_t poff;
4464
4465 if (M_WRITABLE(m) == 0) {
4466 /* Get a writable copy. */
4467 n = m_dup(m, M_DONTWAIT);
4468 m_freem(m);
4469 if (n == NULL)
4470 return (NULL);
4471 m = n;
4472 }
4473 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4474 if (m == NULL)
4475 return (NULL);
4476 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4477 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4478 m = m_pullup(m, poff + sizeof(struct tcphdr));
4479 if (m == NULL)
4480 return (NULL);
4481 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4482 m = m_pullup(m, poff + (tcp->th_off << 2));
4483 if (m == NULL)
4484 return (NULL);
4485 /*
4486 * It seems controller doesn't modify IP length and TCP pseudo
4487 * checksum. These checksum computed by upper stack should be 0.
4488 */
4489 *mss = m->m_pkthdr.tso_segsz;
4490 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4491 ip->ip_sum = 0;
4492 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4493 /* Clear pseudo checksum computed by TCP stack. */
4494 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4495 tcp->th_sum = 0;
4496 /*
4497 * Broadcom controllers uses different descriptor format for
4498 * TSO depending on ASIC revision. Due to TSO-capable firmware
4499 * license issue and lower performance of firmware based TSO
4500 * we only support hardware based TSO.
4501 */
4502 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4503 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4504 if (sc->bge_flags & BGE_FLAG_TSO3) {
4505 /*
4506 * For BCM5717 and newer controllers, hardware based TSO
4507 * uses the 14 lower bits of the bge_mss field to store the
4508 * MSS and the upper 2 bits to store the lowest 2 bits of
4509 * the IP/TCP header length. The upper 6 bits of the header
4510 * length are stored in the bge_flags[14:10,4] field. Jumbo
4511 * frames are supported.
4512 */
4513 *mss |= ((hlen & 0x3) << 14);
4514 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4515 } else {
4516 /*
4517 * For BCM5755 and newer controllers, hardware based TSO uses
4518 * the lower 11 bits to store the MSS and the upper 5 bits to
4519 * store the IP/TCP header length. Jumbo frames are not
4520 * supported.
4521 */
4522 *mss |= (hlen << 11);
4523 }
4524 return (m);
4525}
4526
4527/*
4528 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4529 * pointers to descriptors.
4530 */
4531static int
4532bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4533{
4534 bus_dma_segment_t segs[BGE_NSEG_NEW];
4535 bus_dmamap_t map;
4536 struct bge_tx_bd *d;
4537 struct mbuf *m = *m_head;
4538 uint32_t idx = *txidx;
4539 uint16_t csum_flags, mss, vlan_tag;
4540 int nsegs, i, error;
4541
4542 csum_flags = 0;
4543 mss = 0;
4544 vlan_tag = 0;
4545 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4546 m->m_next != NULL) {
4547 *m_head = bge_check_short_dma(m);
4548 if (*m_head == NULL)
4549 return (ENOBUFS);
4550 m = *m_head;
4551 }
4552 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4553 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4554 if (*m_head == NULL)
4555 return (ENOBUFS);
4556 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4557 BGE_TXBDFLAG_CPU_POST_DMA;
4558 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4559 if (m->m_pkthdr.csum_flags & CSUM_IP)
4560 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4561 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4562 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4563 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4564 (error = bge_cksum_pad(m)) != 0) {
4565 m_freem(m);
4566 *m_head = NULL;
4567 return (error);
4568 }
4569 }
4570 if (m->m_flags & M_LASTFRAG)
4571 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4572 else if (m->m_flags & M_FRAG)
4573 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4574 }
4575
4576 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4577 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4578 m->m_pkthdr.len > ETHER_MAX_LEN)
4579 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4580 if (sc->bge_forced_collapse > 0 &&
4581 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4582 /*
4583 * Forcedly collapse mbuf chains to overcome hardware
4584 * limitation which only support a single outstanding
4585 * DMA read operation.
4586 */
4587 if (sc->bge_forced_collapse == 1)
4588 m = m_defrag(m, M_DONTWAIT);
4589 else
4590 m = m_collapse(m, M_DONTWAIT,
4591 sc->bge_forced_collapse);
4592 if (m == NULL)
4593 m = *m_head;
4594 *m_head = m;
4595 }
4596 }
4597
4598 map = sc->bge_cdata.bge_tx_dmamap[idx];
4599 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4600 &nsegs, BUS_DMA_NOWAIT);
4601 if (error == EFBIG) {
4602 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4603 if (m == NULL) {
4604 m_freem(*m_head);
4605 *m_head = NULL;
4606 return (ENOBUFS);
4607 }
4608 *m_head = m;
4609 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4610 m, segs, &nsegs, BUS_DMA_NOWAIT);
4611 if (error) {
4612 m_freem(m);
4613 *m_head = NULL;
4614 return (error);
4615 }
4616 } else if (error != 0)
4617 return (error);
4618
4619 /* Check if we have enough free send BDs. */
4620 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4621 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4622 return (ENOBUFS);
4623 }
4624
4625 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4626
4627 if (m->m_flags & M_VLANTAG) {
4628 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4629 vlan_tag = m->m_pkthdr.ether_vtag;
4630 }
4631 for (i = 0; ; i++) {
4632 d = &sc->bge_ldata.bge_tx_ring[idx];
4633 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4634 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4635 d->bge_len = segs[i].ds_len;
4636 d->bge_flags = csum_flags;
4637 d->bge_vlan_tag = vlan_tag;
4638 d->bge_mss = mss;
4639 if (i == nsegs - 1)
4640 break;
4641 BGE_INC(idx, BGE_TX_RING_CNT);
4642 }
4643
4644 /* Mark the last segment as end of packet... */
4645 d->bge_flags |= BGE_TXBDFLAG_END;
4646
4647 /*
4648 * Insure that the map for this transmission
4649 * is placed at the array index of the last descriptor
4650 * in this chain.
4651 */
4652 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4653 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4654 sc->bge_cdata.bge_tx_chain[idx] = m;
4655 sc->bge_txcnt += nsegs;
4656
4657 BGE_INC(idx, BGE_TX_RING_CNT);
4658 *txidx = idx;
4659
4660 return (0);
4661}
4662
4663/*
4664 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4665 * to the mbuf data regions directly in the transmit descriptors.
4666 */
4667static void
4668bge_start_locked(struct ifnet *ifp)
4669{
4670 struct bge_softc *sc;
4671 struct mbuf *m_head;
4672 uint32_t prodidx;
4673 int count;
4674
4675 sc = ifp->if_softc;
4676 BGE_LOCK_ASSERT(sc);
4677
4678 if (!sc->bge_link ||
4679 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4680 IFF_DRV_RUNNING)
4681 return;
4682
4683 prodidx = sc->bge_tx_prodidx;
4684
4685 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4686 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4687 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4688 break;
4689 }
4690 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4691 if (m_head == NULL)
4692 break;
4693
4694 /*
4695 * XXX
4696 * The code inside the if() block is never reached since we
4697 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4698 * requests to checksum TCP/UDP in a fragmented packet.
4699 *
4700 * XXX
4701 * safety overkill. If this is a fragmented packet chain
4702 * with delayed TCP/UDP checksums, then only encapsulate
4703 * it if we have enough descriptors to handle the entire
4704 * chain at once.
4705 * (paranoia -- may not actually be needed)
4706 */
4707 if (m_head->m_flags & M_FIRSTFRAG &&
4708 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4709 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4710 m_head->m_pkthdr.csum_data + 16) {
4711 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4712 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4713 break;
4714 }
4715 }
4716
4717 /*
4718 * Pack the data into the transmit ring. If we
4719 * don't have room, set the OACTIVE flag and wait
4720 * for the NIC to drain the ring.
4721 */
4722 if (bge_encap(sc, &m_head, &prodidx)) {
4723 if (m_head == NULL)
4724 break;
4725 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4726 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4727 break;
4728 }
4729 ++count;
4730
4731 /*
4732 * If there's a BPF listener, bounce a copy of this frame
4733 * to him.
4734 */
4735#ifdef ETHER_BPF_MTAP
4736 ETHER_BPF_MTAP(ifp, m_head);
4737#else
4738 BPF_MTAP(ifp, m_head);
4739#endif
4740 }
4741
4742 if (count > 0) {
4743 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4744 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4745 /* Transmit. */
4746 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4747 /* 5700 b2 errata */
4748 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4749 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4750
4751 sc->bge_tx_prodidx = prodidx;
4752
4753 /*
4754 * Set a timeout in case the chip goes out to lunch.
4755 */
4756 sc->bge_timer = 5;
4757 }
4758}
4759
4760/*
4761 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4762 * to the mbuf data regions directly in the transmit descriptors.
4763 */
4764static void
4765bge_start(struct ifnet *ifp)
4766{
4767 struct bge_softc *sc;
4768
4769 sc = ifp->if_softc;
4770 BGE_LOCK(sc);
4771 bge_start_locked(ifp);
4772 BGE_UNLOCK(sc);
4773}
4774
4775static void
4776bge_init_locked(struct bge_softc *sc)
4777{
4778 struct ifnet *ifp;
4779 uint16_t *m;
4780 uint32_t mode;
4781
4782 BGE_LOCK_ASSERT(sc);
4783
4784 ifp = sc->bge_ifp;
4785
4786 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4787 return;
4788
4789 /* Cancel pending I/O and flush buffers. */
4790 bge_stop(sc);
4791
4792 bge_stop_fw(sc);
4793 bge_sig_pre_reset(sc, BGE_RESET_START);
4794 bge_reset(sc);
4795 bge_sig_legacy(sc, BGE_RESET_START);
4796 bge_sig_post_reset(sc, BGE_RESET_START);
4797
4798 bge_chipinit(sc);
4799
4800 /*
4801 * Init the various state machines, ring
4802 * control blocks and firmware.
4803 */
4804 if (bge_blockinit(sc)) {
4805 device_printf(sc->bge_dev, "initialization failure\n");
4806 return;
4807 }
4808
4809 ifp = sc->bge_ifp;
4810
4811 /* Specify MTU. */
4812 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4813 ETHER_HDR_LEN + ETHER_CRC_LEN +
4814 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4815
4816 /* Load our MAC address. */
4817 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4818 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4819 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4820
4821 /* Program promiscuous mode. */
4822 bge_setpromisc(sc);
4823
4824 /* Program multicast filter. */
4825 bge_setmulti(sc);
4826
4827 /* Program VLAN tag stripping. */
4828 bge_setvlan(sc);
4829
4830 /* Override UDP checksum offloading. */
4831 if (sc->bge_forced_udpcsum == 0)
4832 sc->bge_csum_features &= ~CSUM_UDP;
4833 else
4834 sc->bge_csum_features |= CSUM_UDP;
4835 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4836 ifp->if_capenable & IFCAP_TXCSUM) {
4837 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4838 ifp->if_hwassist |= sc->bge_csum_features;
4839 }
4840
4841 /* Init RX ring. */
4842 if (bge_init_rx_ring_std(sc) != 0) {
4843 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4844 bge_stop(sc);
4845 return;
4846 }
4847
4848 /*
4849 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4850 * memory to insure that the chip has in fact read the first
4851 * entry of the ring.
4852 */
4853 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4854 uint32_t v, i;
4855 for (i = 0; i < 10; i++) {
4856 DELAY(20);
4857 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4858 if (v == (MCLBYTES - ETHER_ALIGN))
4859 break;
4860 }
4861 if (i == 10)
4862 device_printf (sc->bge_dev,
4863 "5705 A0 chip failed to load RX ring\n");
4864 }
4865
4866 /* Init jumbo RX ring. */
4867 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4868 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4869 (MCLBYTES - ETHER_ALIGN)) {
4870 if (bge_init_rx_ring_jumbo(sc) != 0) {
4871 device_printf(sc->bge_dev,
4872 "no memory for jumbo Rx buffers.\n");
4873 bge_stop(sc);
4874 return;
4875 }
4876 }
4877
4878 /* Init our RX return ring index. */
4879 sc->bge_rx_saved_considx = 0;
4880
4881 /* Init our RX/TX stat counters. */
4882 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4883
4884 /* Init TX ring. */
4885 bge_init_tx_ring(sc);
4886
4887 /* Enable TX MAC state machine lockup fix. */
4888 mode = CSR_READ_4(sc, BGE_TX_MODE);
4889 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4890 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4891 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
4892 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4893 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4894 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4895 }
4845 /* Turn on transmitter. */
4846 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4847
4848 /* Turn on receiver. */
4849 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4850
4851 /*
4852 * Set the number of good frames to receive after RX MBUF
4853 * Low Watermark has been reached. After the RX MAC receives
4854 * this number of frames, it will drop subsequent incoming
4855 * frames until the MBUF High Watermark is reached.
4856 */
4857 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4858 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4859 else
4860 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4861
4862 /* Clear MAC statistics. */
4863 if (BGE_IS_5705_PLUS(sc))
4864 bge_stats_clear_regs(sc);
4865
4866 /* Tell firmware we're alive. */
4867 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4868
4869#ifdef DEVICE_POLLING
4870 /* Disable interrupts if we are polling. */
4871 if (ifp->if_capenable & IFCAP_POLLING) {
4872 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4873 BGE_PCIMISCCTL_MASK_PCI_INTR);
4874 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4875 } else
4876#endif
4877
4878 /* Enable host interrupts. */
4879 {
4880 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4881 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4882 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4883 }
4884
4885 bge_ifmedia_upd_locked(ifp);
4886
4887 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4888 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4889
4890 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4891}
4892
4893static void
4894bge_init(void *xsc)
4895{
4896 struct bge_softc *sc = xsc;
4897
4898 BGE_LOCK(sc);
4899 bge_init_locked(sc);
4900 BGE_UNLOCK(sc);
4901}
4902
4903/*
4904 * Set media options.
4905 */
4906static int
4907bge_ifmedia_upd(struct ifnet *ifp)
4908{
4909 struct bge_softc *sc = ifp->if_softc;
4910 int res;
4911
4912 BGE_LOCK(sc);
4913 res = bge_ifmedia_upd_locked(ifp);
4914 BGE_UNLOCK(sc);
4915
4916 return (res);
4917}
4918
4919static int
4920bge_ifmedia_upd_locked(struct ifnet *ifp)
4921{
4922 struct bge_softc *sc = ifp->if_softc;
4923 struct mii_data *mii;
4924 struct mii_softc *miisc;
4925 struct ifmedia *ifm;
4926
4927 BGE_LOCK_ASSERT(sc);
4928
4929 ifm = &sc->bge_ifmedia;
4930
4931 /* If this is a 1000baseX NIC, enable the TBI port. */
4932 if (sc->bge_flags & BGE_FLAG_TBI) {
4933 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4934 return (EINVAL);
4935 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4936 case IFM_AUTO:
4937 /*
4938 * The BCM5704 ASIC appears to have a special
4939 * mechanism for programming the autoneg
4940 * advertisement registers in TBI mode.
4941 */
4942 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4943 uint32_t sgdig;
4944 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4945 if (sgdig & BGE_SGDIGSTS_DONE) {
4946 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4947 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4948 sgdig |= BGE_SGDIGCFG_AUTO |
4949 BGE_SGDIGCFG_PAUSE_CAP |
4950 BGE_SGDIGCFG_ASYM_PAUSE;
4951 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4952 sgdig | BGE_SGDIGCFG_SEND);
4953 DELAY(5);
4954 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4955 }
4956 }
4957 break;
4958 case IFM_1000_SX:
4959 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4960 BGE_CLRBIT(sc, BGE_MAC_MODE,
4961 BGE_MACMODE_HALF_DUPLEX);
4962 } else {
4963 BGE_SETBIT(sc, BGE_MAC_MODE,
4964 BGE_MACMODE_HALF_DUPLEX);
4965 }
4966 break;
4967 default:
4968 return (EINVAL);
4969 }
4970 return (0);
4971 }
4972
4973 sc->bge_link_evt++;
4974 mii = device_get_softc(sc->bge_miibus);
4975 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4976 PHY_RESET(miisc);
4977 mii_mediachg(mii);
4978
4979 /*
4980 * Force an interrupt so that we will call bge_link_upd
4981 * if needed and clear any pending link state attention.
4982 * Without this we are not getting any further interrupts
4983 * for link state changes and thus will not UP the link and
4984 * not be able to send in bge_start_locked. The only
4985 * way to get things working was to receive a packet and
4986 * get an RX intr.
4987 * bge_tick should help for fiber cards and we might not
4988 * need to do this here if BGE_FLAG_TBI is set but as
4989 * we poll for fiber anyway it should not harm.
4990 */
4991 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4992 sc->bge_flags & BGE_FLAG_5788)
4993 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4994 else
4995 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4996
4997 return (0);
4998}
4999
5000/*
5001 * Report current media status.
5002 */
5003static void
5004bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5005{
5006 struct bge_softc *sc = ifp->if_softc;
5007 struct mii_data *mii;
5008
5009 BGE_LOCK(sc);
5010
5011 if (sc->bge_flags & BGE_FLAG_TBI) {
5012 ifmr->ifm_status = IFM_AVALID;
5013 ifmr->ifm_active = IFM_ETHER;
5014 if (CSR_READ_4(sc, BGE_MAC_STS) &
5015 BGE_MACSTAT_TBI_PCS_SYNCHED)
5016 ifmr->ifm_status |= IFM_ACTIVE;
5017 else {
5018 ifmr->ifm_active |= IFM_NONE;
5019 BGE_UNLOCK(sc);
5020 return;
5021 }
5022 ifmr->ifm_active |= IFM_1000_SX;
5023 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5024 ifmr->ifm_active |= IFM_HDX;
5025 else
5026 ifmr->ifm_active |= IFM_FDX;
5027 BGE_UNLOCK(sc);
5028 return;
5029 }
5030
5031 mii = device_get_softc(sc->bge_miibus);
5032 mii_pollstat(mii);
5033 ifmr->ifm_active = mii->mii_media_active;
5034 ifmr->ifm_status = mii->mii_media_status;
5035
5036 BGE_UNLOCK(sc);
5037}
5038
5039static int
5040bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5041{
5042 struct bge_softc *sc = ifp->if_softc;
5043 struct ifreq *ifr = (struct ifreq *) data;
5044 struct mii_data *mii;
5045 int flags, mask, error = 0;
5046
5047 switch (command) {
5048 case SIOCSIFMTU:
5049 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5050 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5051 if (ifr->ifr_mtu < ETHERMIN ||
5052 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5053 error = EINVAL;
5054 break;
5055 }
5056 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5057 error = EINVAL;
5058 break;
5059 }
5060 BGE_LOCK(sc);
5061 if (ifp->if_mtu != ifr->ifr_mtu) {
5062 ifp->if_mtu = ifr->ifr_mtu;
5063 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5064 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5065 bge_init_locked(sc);
5066 }
5067 }
5068 BGE_UNLOCK(sc);
5069 break;
5070 case SIOCSIFFLAGS:
5071 BGE_LOCK(sc);
5072 if (ifp->if_flags & IFF_UP) {
5073 /*
5074 * If only the state of the PROMISC flag changed,
5075 * then just use the 'set promisc mode' command
5076 * instead of reinitializing the entire NIC. Doing
5077 * a full re-init means reloading the firmware and
5078 * waiting for it to start up, which may take a
5079 * second or two. Similarly for ALLMULTI.
5080 */
5081 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5082 flags = ifp->if_flags ^ sc->bge_if_flags;
5083 if (flags & IFF_PROMISC)
5084 bge_setpromisc(sc);
5085 if (flags & IFF_ALLMULTI)
5086 bge_setmulti(sc);
5087 } else
5088 bge_init_locked(sc);
5089 } else {
5090 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5091 bge_stop(sc);
5092 }
5093 }
5094 sc->bge_if_flags = ifp->if_flags;
5095 BGE_UNLOCK(sc);
5096 error = 0;
5097 break;
5098 case SIOCADDMULTI:
5099 case SIOCDELMULTI:
5100 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5101 BGE_LOCK(sc);
5102 bge_setmulti(sc);
5103 BGE_UNLOCK(sc);
5104 error = 0;
5105 }
5106 break;
5107 case SIOCSIFMEDIA:
5108 case SIOCGIFMEDIA:
5109 if (sc->bge_flags & BGE_FLAG_TBI) {
5110 error = ifmedia_ioctl(ifp, ifr,
5111 &sc->bge_ifmedia, command);
5112 } else {
5113 mii = device_get_softc(sc->bge_miibus);
5114 error = ifmedia_ioctl(ifp, ifr,
5115 &mii->mii_media, command);
5116 }
5117 break;
5118 case SIOCSIFCAP:
5119 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5120#ifdef DEVICE_POLLING
5121 if (mask & IFCAP_POLLING) {
5122 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5123 error = ether_poll_register(bge_poll, ifp);
5124 if (error)
5125 return (error);
5126 BGE_LOCK(sc);
5127 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5128 BGE_PCIMISCCTL_MASK_PCI_INTR);
5129 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5130 ifp->if_capenable |= IFCAP_POLLING;
5131 BGE_UNLOCK(sc);
5132 } else {
5133 error = ether_poll_deregister(ifp);
5134 /* Enable interrupt even in error case */
5135 BGE_LOCK(sc);
5136 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5137 BGE_PCIMISCCTL_MASK_PCI_INTR);
5138 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5139 ifp->if_capenable &= ~IFCAP_POLLING;
5140 BGE_UNLOCK(sc);
5141 }
5142 }
5143#endif
5144 if ((mask & IFCAP_TXCSUM) != 0 &&
5145 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5146 ifp->if_capenable ^= IFCAP_TXCSUM;
5147 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5148 ifp->if_hwassist |= sc->bge_csum_features;
5149 else
5150 ifp->if_hwassist &= ~sc->bge_csum_features;
5151 }
5152
5153 if ((mask & IFCAP_RXCSUM) != 0 &&
5154 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5155 ifp->if_capenable ^= IFCAP_RXCSUM;
5156
5157 if ((mask & IFCAP_TSO4) != 0 &&
5158 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5159 ifp->if_capenable ^= IFCAP_TSO4;
5160 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5161 ifp->if_hwassist |= CSUM_TSO;
5162 else
5163 ifp->if_hwassist &= ~CSUM_TSO;
5164 }
5165
5166 if (mask & IFCAP_VLAN_MTU) {
5167 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5168 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5169 bge_init(sc);
5170 }
5171
5172 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5173 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5174 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5175 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5176 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5177 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5178 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5179 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5180 BGE_LOCK(sc);
5181 bge_setvlan(sc);
5182 BGE_UNLOCK(sc);
5183 }
5184#ifdef VLAN_CAPABILITIES
5185 VLAN_CAPABILITIES(ifp);
5186#endif
5187 break;
5188 default:
5189 error = ether_ioctl(ifp, command, data);
5190 break;
5191 }
5192
5193 return (error);
5194}
5195
5196static void
5197bge_watchdog(struct bge_softc *sc)
5198{
5199 struct ifnet *ifp;
5200
5201 BGE_LOCK_ASSERT(sc);
5202
5203 if (sc->bge_timer == 0 || --sc->bge_timer)
5204 return;
5205
5206 ifp = sc->bge_ifp;
5207
5208 if_printf(ifp, "watchdog timeout -- resetting\n");
5209
5210 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5211 bge_init_locked(sc);
5212
5213 ifp->if_oerrors++;
5214}
5215
5216static void
5217bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5218{
5219 int i;
5220
5221 BGE_CLRBIT(sc, reg, bit);
5222
5223 for (i = 0; i < BGE_TIMEOUT; i++) {
5224 if ((CSR_READ_4(sc, reg) & bit) == 0)
5225 return;
5226 DELAY(100);
5227 }
5228}
5229
5230/*
5231 * Stop the adapter and free any mbufs allocated to the
5232 * RX and TX lists.
5233 */
5234static void
5235bge_stop(struct bge_softc *sc)
5236{
5237 struct ifnet *ifp;
5238
5239 BGE_LOCK_ASSERT(sc);
5240
5241 ifp = sc->bge_ifp;
5242
5243 callout_stop(&sc->bge_stat_ch);
5244
5245 /* Disable host interrupts. */
5246 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5247 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5248
5249 /*
5250 * Tell firmware we're shutting down.
5251 */
5252 bge_stop_fw(sc);
5253 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5254
5255 /*
5256 * Disable all of the receiver blocks.
5257 */
5258 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5259 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5260 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5261 if (BGE_IS_5700_FAMILY(sc))
5262 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5263 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5264 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5265 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5266
5267 /*
5268 * Disable all of the transmit blocks.
5269 */
5270 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5271 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5272 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5273 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5274 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5275 if (BGE_IS_5700_FAMILY(sc))
5276 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5277 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5278
5279 /*
5280 * Shut down all of the memory managers and related
5281 * state machines.
5282 */
5283 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5284 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5285 if (BGE_IS_5700_FAMILY(sc))
5286 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5287
5288 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5289 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5290 if (!(BGE_IS_5705_PLUS(sc))) {
5291 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5292 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5293 }
5294 /* Update MAC statistics. */
5295 if (BGE_IS_5705_PLUS(sc))
5296 bge_stats_update_regs(sc);
5297
5298 bge_reset(sc);
5299 bge_sig_legacy(sc, BGE_RESET_STOP);
5300 bge_sig_post_reset(sc, BGE_RESET_STOP);
5301
5302 /*
5303 * Keep the ASF firmware running if up.
5304 */
5305 if (sc->bge_asf_mode & ASF_STACKUP)
5306 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5307 else
5308 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5309
5310 /* Free the RX lists. */
5311 bge_free_rx_ring_std(sc);
5312
5313 /* Free jumbo RX list. */
5314 if (BGE_IS_JUMBO_CAPABLE(sc))
5315 bge_free_rx_ring_jumbo(sc);
5316
5317 /* Free TX buffers. */
5318 bge_free_tx_ring(sc);
5319
5320 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5321
5322 /* Clear MAC's link state (PHY may still have link UP). */
5323 if (bootverbose && sc->bge_link)
5324 if_printf(sc->bge_ifp, "link DOWN\n");
5325 sc->bge_link = 0;
5326
5327 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5328}
5329
5330/*
5331 * Stop all chip I/O so that the kernel's probe routines don't
5332 * get confused by errant DMAs when rebooting.
5333 */
5334static int
5335bge_shutdown(device_t dev)
5336{
5337 struct bge_softc *sc;
5338
5339 sc = device_get_softc(dev);
5340 BGE_LOCK(sc);
5341 bge_stop(sc);
5342 bge_reset(sc);
5343 BGE_UNLOCK(sc);
5344
5345 return (0);
5346}
5347
5348static int
5349bge_suspend(device_t dev)
5350{
5351 struct bge_softc *sc;
5352
5353 sc = device_get_softc(dev);
5354 BGE_LOCK(sc);
5355 bge_stop(sc);
5356 BGE_UNLOCK(sc);
5357
5358 return (0);
5359}
5360
5361static int
5362bge_resume(device_t dev)
5363{
5364 struct bge_softc *sc;
5365 struct ifnet *ifp;
5366
5367 sc = device_get_softc(dev);
5368 BGE_LOCK(sc);
5369 ifp = sc->bge_ifp;
5370 if (ifp->if_flags & IFF_UP) {
5371 bge_init_locked(sc);
5372 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5373 bge_start_locked(ifp);
5374 }
5375 BGE_UNLOCK(sc);
5376
5377 return (0);
5378}
5379
5380static void
5381bge_link_upd(struct bge_softc *sc)
5382{
5383 struct mii_data *mii;
5384 uint32_t link, status;
5385
5386 BGE_LOCK_ASSERT(sc);
5387
5388 /* Clear 'pending link event' flag. */
5389 sc->bge_link_evt = 0;
5390
5391 /*
5392 * Process link state changes.
5393 * Grrr. The link status word in the status block does
5394 * not work correctly on the BCM5700 rev AX and BX chips,
5395 * according to all available information. Hence, we have
5396 * to enable MII interrupts in order to properly obtain
5397 * async link changes. Unfortunately, this also means that
5398 * we have to read the MAC status register to detect link
5399 * changes, thereby adding an additional register access to
5400 * the interrupt handler.
5401 *
5402 * XXX: perhaps link state detection procedure used for
5403 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5404 */
5405
5406 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5407 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5408 status = CSR_READ_4(sc, BGE_MAC_STS);
5409 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5410 mii = device_get_softc(sc->bge_miibus);
5411 mii_pollstat(mii);
5412 if (!sc->bge_link &&
5413 mii->mii_media_status & IFM_ACTIVE &&
5414 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5415 sc->bge_link++;
5416 if (bootverbose)
5417 if_printf(sc->bge_ifp, "link UP\n");
5418 } else if (sc->bge_link &&
5419 (!(mii->mii_media_status & IFM_ACTIVE) ||
5420 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5421 sc->bge_link = 0;
5422 if (bootverbose)
5423 if_printf(sc->bge_ifp, "link DOWN\n");
5424 }
5425
5426 /* Clear the interrupt. */
5427 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5428 BGE_EVTENB_MI_INTERRUPT);
5429 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5430 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5431 BRGPHY_INTRS);
5432 }
5433 return;
5434 }
5435
5436 if (sc->bge_flags & BGE_FLAG_TBI) {
5437 status = CSR_READ_4(sc, BGE_MAC_STS);
5438 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5439 if (!sc->bge_link) {
5440 sc->bge_link++;
5441 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5442 BGE_CLRBIT(sc, BGE_MAC_MODE,
5443 BGE_MACMODE_TBI_SEND_CFGS);
5444 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5445 if (bootverbose)
5446 if_printf(sc->bge_ifp, "link UP\n");
5447 if_link_state_change(sc->bge_ifp,
5448 LINK_STATE_UP);
5449 }
5450 } else if (sc->bge_link) {
5451 sc->bge_link = 0;
5452 if (bootverbose)
5453 if_printf(sc->bge_ifp, "link DOWN\n");
5454 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5455 }
5456 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5457 /*
5458 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5459 * in status word always set. Workaround this bug by reading
5460 * PHY link status directly.
5461 */
5462 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5463
5464 if (link != sc->bge_link ||
5465 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5466 mii = device_get_softc(sc->bge_miibus);
5467 mii_pollstat(mii);
5468 if (!sc->bge_link &&
5469 mii->mii_media_status & IFM_ACTIVE &&
5470 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5471 sc->bge_link++;
5472 if (bootverbose)
5473 if_printf(sc->bge_ifp, "link UP\n");
5474 } else if (sc->bge_link &&
5475 (!(mii->mii_media_status & IFM_ACTIVE) ||
5476 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5477 sc->bge_link = 0;
5478 if (bootverbose)
5479 if_printf(sc->bge_ifp, "link DOWN\n");
5480 }
5481 }
5482 } else {
5483 /*
5484 * For controllers that call mii_tick, we have to poll
5485 * link status.
5486 */
5487 mii = device_get_softc(sc->bge_miibus);
5488 mii_pollstat(mii);
5489 bge_miibus_statchg(sc->bge_dev);
5490 }
5491
5492 /* Clear the attention. */
5493 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5494 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5495 BGE_MACSTAT_LINK_CHANGED);
5496}
5497
5498static void
5499bge_add_sysctls(struct bge_softc *sc)
5500{
5501 struct sysctl_ctx_list *ctx;
5502 struct sysctl_oid_list *children;
5503 char tn[32];
5504 int unit;
5505
5506 ctx = device_get_sysctl_ctx(sc->bge_dev);
5507 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5508
5509#ifdef BGE_REGISTER_DEBUG
5510 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5511 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5512 "Debug Information");
5513
5514 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5515 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5516 "Register Read");
5517
5518 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5519 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5520 "Memory Read");
5521
5522#endif
5523
5524 unit = device_get_unit(sc->bge_dev);
5525 /*
5526 * A common design characteristic for many Broadcom client controllers
5527 * is that they only support a single outstanding DMA read operation
5528 * on the PCIe bus. This means that it will take twice as long to fetch
5529 * a TX frame that is split into header and payload buffers as it does
5530 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5531 * these controllers, coalescing buffers to reduce the number of memory
5532 * reads is effective way to get maximum performance(about 940Mbps).
5533 * Without collapsing TX buffers the maximum TCP bulk transfer
5534 * performance is about 850Mbps. However forcing coalescing mbufs
5535 * consumes a lot of CPU cycles, so leave it off by default.
5536 */
5537 sc->bge_forced_collapse = 0;
5538 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5539 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5540 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5541 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5542 "Number of fragmented TX buffers of a frame allowed before "
5543 "forced collapsing");
5544
5545 /*
5546 * It seems all Broadcom controllers have a bug that can generate UDP
5547 * datagrams with checksum value 0 when TX UDP checksum offloading is
5548 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5549 * Even though the probability of generating such UDP datagrams is
5550 * low, I don't want to see FreeBSD boxes to inject such datagrams
5551 * into network so disable UDP checksum offloading by default. Users
5552 * still override this behavior by setting a sysctl variable,
5553 * dev.bge.0.forced_udpcsum.
5554 */
5555 sc->bge_forced_udpcsum = 0;
5556 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5557 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5558 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5559 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5560 "Enable UDP checksum offloading even if controller can "
5561 "generate UDP checksum value 0");
5562
5563 if (BGE_IS_5705_PLUS(sc))
5564 bge_add_sysctl_stats_regs(sc, ctx, children);
5565 else
5566 bge_add_sysctl_stats(sc, ctx, children);
5567}
5568
5569#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5570 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5571 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5572 desc)
5573
5574static void
5575bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5576 struct sysctl_oid_list *parent)
5577{
5578 struct sysctl_oid *tree;
5579 struct sysctl_oid_list *children, *schildren;
5580
5581 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5582 NULL, "BGE Statistics");
5583 schildren = children = SYSCTL_CHILDREN(tree);
5584 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5585 children, COSFramesDroppedDueToFilters,
5586 "FramesDroppedDueToFilters");
5587 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5588 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5589 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5590 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5591 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5592 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5593 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5594 children, ifInDiscards, "InputDiscards");
5595 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5596 children, ifInErrors, "InputErrors");
5597 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5598 children, nicRecvThresholdHit, "RecvThresholdHit");
5599 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5600 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5601 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5602 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5603 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5604 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5605 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5606 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5607 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5608 children, nicRingStatusUpdate, "RingStatusUpdate");
5609 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5610 children, nicInterrupts, "Interrupts");
5611 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5612 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5613 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5614 children, nicSendThresholdHit, "SendThresholdHit");
5615
5616 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5617 NULL, "BGE RX Statistics");
5618 children = SYSCTL_CHILDREN(tree);
5619 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5620 children, rxstats.ifHCInOctets, "ifHCInOctets");
5621 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5622 children, rxstats.etherStatsFragments, "Fragments");
5623 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5624 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5625 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5626 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5627 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5628 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5629 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5630 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5631 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5632 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5633 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5634 children, rxstats.xoffPauseFramesReceived,
5635 "xoffPauseFramesReceived");
5636 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5637 children, rxstats.macControlFramesReceived,
5638 "ControlFramesReceived");
5639 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5640 children, rxstats.xoffStateEntered, "xoffStateEntered");
5641 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5642 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5643 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5644 children, rxstats.etherStatsJabbers, "Jabbers");
5645 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5646 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5647 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5648 children, rxstats.inRangeLengthError, "inRangeLengthError");
5649 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5650 children, rxstats.outRangeLengthError, "outRangeLengthError");
5651
5652 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5653 NULL, "BGE TX Statistics");
5654 children = SYSCTL_CHILDREN(tree);
5655 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5656 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5657 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5658 children, txstats.etherStatsCollisions, "Collisions");
5659 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5660 children, txstats.outXonSent, "XonSent");
5661 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5662 children, txstats.outXoffSent, "XoffSent");
5663 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5664 children, txstats.flowControlDone, "flowControlDone");
5665 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5666 children, txstats.dot3StatsInternalMacTransmitErrors,
5667 "InternalMacTransmitErrors");
5668 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5669 children, txstats.dot3StatsSingleCollisionFrames,
5670 "SingleCollisionFrames");
5671 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5672 children, txstats.dot3StatsMultipleCollisionFrames,
5673 "MultipleCollisionFrames");
5674 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5675 children, txstats.dot3StatsDeferredTransmissions,
5676 "DeferredTransmissions");
5677 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5678 children, txstats.dot3StatsExcessiveCollisions,
5679 "ExcessiveCollisions");
5680 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5681 children, txstats.dot3StatsLateCollisions,
5682 "LateCollisions");
5683 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5684 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5685 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5686 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5687 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5688 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5689 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5690 children, txstats.dot3StatsCarrierSenseErrors,
5691 "CarrierSenseErrors");
5692 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5693 children, txstats.ifOutDiscards, "Discards");
5694 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5695 children, txstats.ifOutErrors, "Errors");
5696}
5697
5698#undef BGE_SYSCTL_STAT
5699
5700#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5701 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5702
5703static void
5704bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5705 struct sysctl_oid_list *parent)
5706{
5707 struct sysctl_oid *tree;
5708 struct sysctl_oid_list *child, *schild;
5709 struct bge_mac_stats *stats;
5710
5711 stats = &sc->bge_mac_stats;
5712 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5713 NULL, "BGE Statistics");
5714 schild = child = SYSCTL_CHILDREN(tree);
5715 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5716 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5717 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5718 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5719 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5720 &stats->DmaWriteHighPriQueueFull,
5721 "NIC DMA Write High Priority Queue Full");
5722 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5723 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5724 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5725 &stats->InputDiscards, "Discarded Input Frames");
5726 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5727 &stats->InputErrors, "Input Errors");
5728 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5729 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5730
5731 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5732 NULL, "BGE RX Statistics");
5733 child = SYSCTL_CHILDREN(tree);
5734 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5735 &stats->ifHCInOctets, "Inbound Octets");
5736 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5737 &stats->etherStatsFragments, "Fragments");
5738 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5739 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5740 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5741 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5742 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5743 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5744 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5745 &stats->dot3StatsFCSErrors, "FCS Errors");
5746 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5747 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5748 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5749 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5750 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5751 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5752 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5753 &stats->macControlFramesReceived, "MAC Control Frames Received");
5754 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5755 &stats->xoffStateEntered, "XOFF State Entered");
5756 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5757 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5758 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5759 &stats->etherStatsJabbers, "Jabbers");
5760 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5761 &stats->etherStatsUndersizePkts, "Undersized Packets");
5762
5763 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5764 NULL, "BGE TX Statistics");
5765 child = SYSCTL_CHILDREN(tree);
5766 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5767 &stats->ifHCOutOctets, "Outbound Octets");
5768 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5769 &stats->etherStatsCollisions, "TX Collisions");
5770 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5771 &stats->outXonSent, "XON Sent");
5772 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5773 &stats->outXoffSent, "XOFF Sent");
5774 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5775 &stats->dot3StatsInternalMacTransmitErrors,
5776 "Internal MAC TX Errors");
5777 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5778 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5779 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5780 &stats->dot3StatsMultipleCollisionFrames,
5781 "Multiple Collision Frames");
5782 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5783 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5784 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5785 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5786 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5787 &stats->dot3StatsLateCollisions, "Late Collisions");
5788 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5789 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5790 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5791 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5792 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5793 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5794}
5795
5796#undef BGE_SYSCTL_STAT_ADD64
5797
5798static int
5799bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5800{
5801 struct bge_softc *sc;
5802 uint32_t result;
5803 int offset;
5804
5805 sc = (struct bge_softc *)arg1;
5806 offset = arg2;
5807 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5808 offsetof(bge_hostaddr, bge_addr_lo));
5809 return (sysctl_handle_int(oidp, &result, 0, req));
5810}
5811
5812#ifdef BGE_REGISTER_DEBUG
5813static int
5814bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5815{
5816 struct bge_softc *sc;
5817 uint16_t *sbdata;
5818 int error, result, sbsz;
5819 int i, j;
5820
5821 result = -1;
5822 error = sysctl_handle_int(oidp, &result, 0, req);
5823 if (error || (req->newptr == NULL))
5824 return (error);
5825
5826 if (result == 1) {
5827 sc = (struct bge_softc *)arg1;
5828
5829 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5830 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5831 sbsz = BGE_STATUS_BLK_SZ;
5832 else
5833 sbsz = 32;
5834 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5835 printf("Status Block:\n");
5836 BGE_LOCK(sc);
5837 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5838 sc->bge_cdata.bge_status_map,
5839 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5840 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5841 printf("%06x:", i);
5842 for (j = 0; j < 8; j++)
5843 printf(" %04x", sbdata[i++]);
5844 printf("\n");
5845 }
5846
5847 printf("Registers:\n");
5848 for (i = 0x800; i < 0xA00; ) {
5849 printf("%06x:", i);
5850 for (j = 0; j < 8; j++) {
5851 printf(" %08x", CSR_READ_4(sc, i));
5852 i += 4;
5853 }
5854 printf("\n");
5855 }
5856 BGE_UNLOCK(sc);
5857
5858 printf("Hardware Flags:\n");
5859 if (BGE_IS_5717_PLUS(sc))
5860 printf(" - 5717 Plus\n");
5861 if (BGE_IS_5755_PLUS(sc))
5862 printf(" - 5755 Plus\n");
5863 if (BGE_IS_575X_PLUS(sc))
5864 printf(" - 575X Plus\n");
5865 if (BGE_IS_5705_PLUS(sc))
5866 printf(" - 5705 Plus\n");
5867 if (BGE_IS_5714_FAMILY(sc))
5868 printf(" - 5714 Family\n");
5869 if (BGE_IS_5700_FAMILY(sc))
5870 printf(" - 5700 Family\n");
5871 if (sc->bge_flags & BGE_FLAG_JUMBO)
5872 printf(" - Supports Jumbo Frames\n");
5873 if (sc->bge_flags & BGE_FLAG_PCIX)
5874 printf(" - PCI-X Bus\n");
5875 if (sc->bge_flags & BGE_FLAG_PCIE)
5876 printf(" - PCI Express Bus\n");
5877 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5878 printf(" - No 3 LEDs\n");
5879 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5880 printf(" - RX Alignment Bug\n");
5881 }
5882
5883 return (error);
5884}
5885
5886static int
5887bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5888{
5889 struct bge_softc *sc;
5890 int error;
5891 uint16_t result;
5892 uint32_t val;
5893
5894 result = -1;
5895 error = sysctl_handle_int(oidp, &result, 0, req);
5896 if (error || (req->newptr == NULL))
5897 return (error);
5898
5899 if (result < 0x8000) {
5900 sc = (struct bge_softc *)arg1;
5901 val = CSR_READ_4(sc, result);
5902 printf("reg 0x%06X = 0x%08X\n", result, val);
5903 }
5904
5905 return (error);
5906}
5907
5908static int
5909bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5910{
5911 struct bge_softc *sc;
5912 int error;
5913 uint16_t result;
5914 uint32_t val;
5915
5916 result = -1;
5917 error = sysctl_handle_int(oidp, &result, 0, req);
5918 if (error || (req->newptr == NULL))
5919 return (error);
5920
5921 if (result < 0x8000) {
5922 sc = (struct bge_softc *)arg1;
5923 val = bge_readmem_ind(sc, result);
5924 printf("mem 0x%06X = 0x%08X\n", result, val);
5925 }
5926
5927 return (error);
5928}
5929#endif
5930
5931static int
5932bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5933{
5934
5935 if (sc->bge_flags & BGE_FLAG_EADDR)
5936 return (1);
5937
5938#ifdef __sparc64__
5939 OF_getetheraddr(sc->bge_dev, ether_addr);
5940 return (0);
5941#endif
5942 return (1);
5943}
5944
5945static int
5946bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5947{
5948 uint32_t mac_addr;
5949
5950 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
5951 if ((mac_addr >> 16) == 0x484b) {
5952 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5953 ether_addr[1] = (uint8_t)mac_addr;
5954 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
5955 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5956 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5957 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5958 ether_addr[5] = (uint8_t)mac_addr;
5959 return (0);
5960 }
5961 return (1);
5962}
5963
5964static int
5965bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5966{
5967 int mac_offset = BGE_EE_MAC_OFFSET;
5968
5969 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5970 mac_offset = BGE_EE_MAC_OFFSET_5906;
5971
5972 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5973 ETHER_ADDR_LEN));
5974}
5975
5976static int
5977bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5978{
5979
5980 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5981 return (1);
5982
5983 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5984 ETHER_ADDR_LEN));
5985}
5986
5987static int
5988bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5989{
5990 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5991 /* NOTE: Order is critical */
5992 bge_get_eaddr_fw,
5993 bge_get_eaddr_mem,
5994 bge_get_eaddr_nvram,
5995 bge_get_eaddr_eeprom,
5996 NULL
5997 };
5998 const bge_eaddr_fcn_t *func;
5999
6000 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6001 if ((*func)(sc, eaddr) == 0)
6002 break;
6003 }
6004 return (*func == NULL ? ENXIO : 0);
6005}
4896 /* Turn on transmitter. */
4897 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4898
4899 /* Turn on receiver. */
4900 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4901
4902 /*
4903 * Set the number of good frames to receive after RX MBUF
4904 * Low Watermark has been reached. After the RX MAC receives
4905 * this number of frames, it will drop subsequent incoming
4906 * frames until the MBUF High Watermark is reached.
4907 */
4908 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4909 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4910 else
4911 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4912
4913 /* Clear MAC statistics. */
4914 if (BGE_IS_5705_PLUS(sc))
4915 bge_stats_clear_regs(sc);
4916
4917 /* Tell firmware we're alive. */
4918 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4919
4920#ifdef DEVICE_POLLING
4921 /* Disable interrupts if we are polling. */
4922 if (ifp->if_capenable & IFCAP_POLLING) {
4923 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4924 BGE_PCIMISCCTL_MASK_PCI_INTR);
4925 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4926 } else
4927#endif
4928
4929 /* Enable host interrupts. */
4930 {
4931 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4932 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4933 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4934 }
4935
4936 bge_ifmedia_upd_locked(ifp);
4937
4938 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4939 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4940
4941 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4942}
4943
4944static void
4945bge_init(void *xsc)
4946{
4947 struct bge_softc *sc = xsc;
4948
4949 BGE_LOCK(sc);
4950 bge_init_locked(sc);
4951 BGE_UNLOCK(sc);
4952}
4953
4954/*
4955 * Set media options.
4956 */
4957static int
4958bge_ifmedia_upd(struct ifnet *ifp)
4959{
4960 struct bge_softc *sc = ifp->if_softc;
4961 int res;
4962
4963 BGE_LOCK(sc);
4964 res = bge_ifmedia_upd_locked(ifp);
4965 BGE_UNLOCK(sc);
4966
4967 return (res);
4968}
4969
4970static int
4971bge_ifmedia_upd_locked(struct ifnet *ifp)
4972{
4973 struct bge_softc *sc = ifp->if_softc;
4974 struct mii_data *mii;
4975 struct mii_softc *miisc;
4976 struct ifmedia *ifm;
4977
4978 BGE_LOCK_ASSERT(sc);
4979
4980 ifm = &sc->bge_ifmedia;
4981
4982 /* If this is a 1000baseX NIC, enable the TBI port. */
4983 if (sc->bge_flags & BGE_FLAG_TBI) {
4984 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4985 return (EINVAL);
4986 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4987 case IFM_AUTO:
4988 /*
4989 * The BCM5704 ASIC appears to have a special
4990 * mechanism for programming the autoneg
4991 * advertisement registers in TBI mode.
4992 */
4993 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4994 uint32_t sgdig;
4995 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4996 if (sgdig & BGE_SGDIGSTS_DONE) {
4997 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4998 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4999 sgdig |= BGE_SGDIGCFG_AUTO |
5000 BGE_SGDIGCFG_PAUSE_CAP |
5001 BGE_SGDIGCFG_ASYM_PAUSE;
5002 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5003 sgdig | BGE_SGDIGCFG_SEND);
5004 DELAY(5);
5005 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5006 }
5007 }
5008 break;
5009 case IFM_1000_SX:
5010 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5011 BGE_CLRBIT(sc, BGE_MAC_MODE,
5012 BGE_MACMODE_HALF_DUPLEX);
5013 } else {
5014 BGE_SETBIT(sc, BGE_MAC_MODE,
5015 BGE_MACMODE_HALF_DUPLEX);
5016 }
5017 break;
5018 default:
5019 return (EINVAL);
5020 }
5021 return (0);
5022 }
5023
5024 sc->bge_link_evt++;
5025 mii = device_get_softc(sc->bge_miibus);
5026 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5027 PHY_RESET(miisc);
5028 mii_mediachg(mii);
5029
5030 /*
5031 * Force an interrupt so that we will call bge_link_upd
5032 * if needed and clear any pending link state attention.
5033 * Without this we are not getting any further interrupts
5034 * for link state changes and thus will not UP the link and
5035 * not be able to send in bge_start_locked. The only
5036 * way to get things working was to receive a packet and
5037 * get an RX intr.
5038 * bge_tick should help for fiber cards and we might not
5039 * need to do this here if BGE_FLAG_TBI is set but as
5040 * we poll for fiber anyway it should not harm.
5041 */
5042 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5043 sc->bge_flags & BGE_FLAG_5788)
5044 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5045 else
5046 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5047
5048 return (0);
5049}
5050
5051/*
5052 * Report current media status.
5053 */
5054static void
5055bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5056{
5057 struct bge_softc *sc = ifp->if_softc;
5058 struct mii_data *mii;
5059
5060 BGE_LOCK(sc);
5061
5062 if (sc->bge_flags & BGE_FLAG_TBI) {
5063 ifmr->ifm_status = IFM_AVALID;
5064 ifmr->ifm_active = IFM_ETHER;
5065 if (CSR_READ_4(sc, BGE_MAC_STS) &
5066 BGE_MACSTAT_TBI_PCS_SYNCHED)
5067 ifmr->ifm_status |= IFM_ACTIVE;
5068 else {
5069 ifmr->ifm_active |= IFM_NONE;
5070 BGE_UNLOCK(sc);
5071 return;
5072 }
5073 ifmr->ifm_active |= IFM_1000_SX;
5074 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5075 ifmr->ifm_active |= IFM_HDX;
5076 else
5077 ifmr->ifm_active |= IFM_FDX;
5078 BGE_UNLOCK(sc);
5079 return;
5080 }
5081
5082 mii = device_get_softc(sc->bge_miibus);
5083 mii_pollstat(mii);
5084 ifmr->ifm_active = mii->mii_media_active;
5085 ifmr->ifm_status = mii->mii_media_status;
5086
5087 BGE_UNLOCK(sc);
5088}
5089
5090static int
5091bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5092{
5093 struct bge_softc *sc = ifp->if_softc;
5094 struct ifreq *ifr = (struct ifreq *) data;
5095 struct mii_data *mii;
5096 int flags, mask, error = 0;
5097
5098 switch (command) {
5099 case SIOCSIFMTU:
5100 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5101 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5102 if (ifr->ifr_mtu < ETHERMIN ||
5103 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5104 error = EINVAL;
5105 break;
5106 }
5107 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5108 error = EINVAL;
5109 break;
5110 }
5111 BGE_LOCK(sc);
5112 if (ifp->if_mtu != ifr->ifr_mtu) {
5113 ifp->if_mtu = ifr->ifr_mtu;
5114 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5115 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5116 bge_init_locked(sc);
5117 }
5118 }
5119 BGE_UNLOCK(sc);
5120 break;
5121 case SIOCSIFFLAGS:
5122 BGE_LOCK(sc);
5123 if (ifp->if_flags & IFF_UP) {
5124 /*
5125 * If only the state of the PROMISC flag changed,
5126 * then just use the 'set promisc mode' command
5127 * instead of reinitializing the entire NIC. Doing
5128 * a full re-init means reloading the firmware and
5129 * waiting for it to start up, which may take a
5130 * second or two. Similarly for ALLMULTI.
5131 */
5132 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5133 flags = ifp->if_flags ^ sc->bge_if_flags;
5134 if (flags & IFF_PROMISC)
5135 bge_setpromisc(sc);
5136 if (flags & IFF_ALLMULTI)
5137 bge_setmulti(sc);
5138 } else
5139 bge_init_locked(sc);
5140 } else {
5141 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5142 bge_stop(sc);
5143 }
5144 }
5145 sc->bge_if_flags = ifp->if_flags;
5146 BGE_UNLOCK(sc);
5147 error = 0;
5148 break;
5149 case SIOCADDMULTI:
5150 case SIOCDELMULTI:
5151 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5152 BGE_LOCK(sc);
5153 bge_setmulti(sc);
5154 BGE_UNLOCK(sc);
5155 error = 0;
5156 }
5157 break;
5158 case SIOCSIFMEDIA:
5159 case SIOCGIFMEDIA:
5160 if (sc->bge_flags & BGE_FLAG_TBI) {
5161 error = ifmedia_ioctl(ifp, ifr,
5162 &sc->bge_ifmedia, command);
5163 } else {
5164 mii = device_get_softc(sc->bge_miibus);
5165 error = ifmedia_ioctl(ifp, ifr,
5166 &mii->mii_media, command);
5167 }
5168 break;
5169 case SIOCSIFCAP:
5170 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5171#ifdef DEVICE_POLLING
5172 if (mask & IFCAP_POLLING) {
5173 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5174 error = ether_poll_register(bge_poll, ifp);
5175 if (error)
5176 return (error);
5177 BGE_LOCK(sc);
5178 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5179 BGE_PCIMISCCTL_MASK_PCI_INTR);
5180 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5181 ifp->if_capenable |= IFCAP_POLLING;
5182 BGE_UNLOCK(sc);
5183 } else {
5184 error = ether_poll_deregister(ifp);
5185 /* Enable interrupt even in error case */
5186 BGE_LOCK(sc);
5187 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5188 BGE_PCIMISCCTL_MASK_PCI_INTR);
5189 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5190 ifp->if_capenable &= ~IFCAP_POLLING;
5191 BGE_UNLOCK(sc);
5192 }
5193 }
5194#endif
5195 if ((mask & IFCAP_TXCSUM) != 0 &&
5196 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5197 ifp->if_capenable ^= IFCAP_TXCSUM;
5198 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5199 ifp->if_hwassist |= sc->bge_csum_features;
5200 else
5201 ifp->if_hwassist &= ~sc->bge_csum_features;
5202 }
5203
5204 if ((mask & IFCAP_RXCSUM) != 0 &&
5205 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5206 ifp->if_capenable ^= IFCAP_RXCSUM;
5207
5208 if ((mask & IFCAP_TSO4) != 0 &&
5209 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5210 ifp->if_capenable ^= IFCAP_TSO4;
5211 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5212 ifp->if_hwassist |= CSUM_TSO;
5213 else
5214 ifp->if_hwassist &= ~CSUM_TSO;
5215 }
5216
5217 if (mask & IFCAP_VLAN_MTU) {
5218 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5219 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5220 bge_init(sc);
5221 }
5222
5223 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5224 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5225 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5226 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5227 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5228 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5229 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5230 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5231 BGE_LOCK(sc);
5232 bge_setvlan(sc);
5233 BGE_UNLOCK(sc);
5234 }
5235#ifdef VLAN_CAPABILITIES
5236 VLAN_CAPABILITIES(ifp);
5237#endif
5238 break;
5239 default:
5240 error = ether_ioctl(ifp, command, data);
5241 break;
5242 }
5243
5244 return (error);
5245}
5246
5247static void
5248bge_watchdog(struct bge_softc *sc)
5249{
5250 struct ifnet *ifp;
5251
5252 BGE_LOCK_ASSERT(sc);
5253
5254 if (sc->bge_timer == 0 || --sc->bge_timer)
5255 return;
5256
5257 ifp = sc->bge_ifp;
5258
5259 if_printf(ifp, "watchdog timeout -- resetting\n");
5260
5261 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5262 bge_init_locked(sc);
5263
5264 ifp->if_oerrors++;
5265}
5266
5267static void
5268bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5269{
5270 int i;
5271
5272 BGE_CLRBIT(sc, reg, bit);
5273
5274 for (i = 0; i < BGE_TIMEOUT; i++) {
5275 if ((CSR_READ_4(sc, reg) & bit) == 0)
5276 return;
5277 DELAY(100);
5278 }
5279}
5280
5281/*
5282 * Stop the adapter and free any mbufs allocated to the
5283 * RX and TX lists.
5284 */
5285static void
5286bge_stop(struct bge_softc *sc)
5287{
5288 struct ifnet *ifp;
5289
5290 BGE_LOCK_ASSERT(sc);
5291
5292 ifp = sc->bge_ifp;
5293
5294 callout_stop(&sc->bge_stat_ch);
5295
5296 /* Disable host interrupts. */
5297 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5298 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5299
5300 /*
5301 * Tell firmware we're shutting down.
5302 */
5303 bge_stop_fw(sc);
5304 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5305
5306 /*
5307 * Disable all of the receiver blocks.
5308 */
5309 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5310 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5311 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5312 if (BGE_IS_5700_FAMILY(sc))
5313 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5314 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5315 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5316 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5317
5318 /*
5319 * Disable all of the transmit blocks.
5320 */
5321 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5322 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5323 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5324 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5325 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5326 if (BGE_IS_5700_FAMILY(sc))
5327 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5328 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5329
5330 /*
5331 * Shut down all of the memory managers and related
5332 * state machines.
5333 */
5334 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5335 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5336 if (BGE_IS_5700_FAMILY(sc))
5337 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5338
5339 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5340 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5341 if (!(BGE_IS_5705_PLUS(sc))) {
5342 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5343 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5344 }
5345 /* Update MAC statistics. */
5346 if (BGE_IS_5705_PLUS(sc))
5347 bge_stats_update_regs(sc);
5348
5349 bge_reset(sc);
5350 bge_sig_legacy(sc, BGE_RESET_STOP);
5351 bge_sig_post_reset(sc, BGE_RESET_STOP);
5352
5353 /*
5354 * Keep the ASF firmware running if up.
5355 */
5356 if (sc->bge_asf_mode & ASF_STACKUP)
5357 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5358 else
5359 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5360
5361 /* Free the RX lists. */
5362 bge_free_rx_ring_std(sc);
5363
5364 /* Free jumbo RX list. */
5365 if (BGE_IS_JUMBO_CAPABLE(sc))
5366 bge_free_rx_ring_jumbo(sc);
5367
5368 /* Free TX buffers. */
5369 bge_free_tx_ring(sc);
5370
5371 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5372
5373 /* Clear MAC's link state (PHY may still have link UP). */
5374 if (bootverbose && sc->bge_link)
5375 if_printf(sc->bge_ifp, "link DOWN\n");
5376 sc->bge_link = 0;
5377
5378 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5379}
5380
5381/*
5382 * Stop all chip I/O so that the kernel's probe routines don't
5383 * get confused by errant DMAs when rebooting.
5384 */
5385static int
5386bge_shutdown(device_t dev)
5387{
5388 struct bge_softc *sc;
5389
5390 sc = device_get_softc(dev);
5391 BGE_LOCK(sc);
5392 bge_stop(sc);
5393 bge_reset(sc);
5394 BGE_UNLOCK(sc);
5395
5396 return (0);
5397}
5398
5399static int
5400bge_suspend(device_t dev)
5401{
5402 struct bge_softc *sc;
5403
5404 sc = device_get_softc(dev);
5405 BGE_LOCK(sc);
5406 bge_stop(sc);
5407 BGE_UNLOCK(sc);
5408
5409 return (0);
5410}
5411
5412static int
5413bge_resume(device_t dev)
5414{
5415 struct bge_softc *sc;
5416 struct ifnet *ifp;
5417
5418 sc = device_get_softc(dev);
5419 BGE_LOCK(sc);
5420 ifp = sc->bge_ifp;
5421 if (ifp->if_flags & IFF_UP) {
5422 bge_init_locked(sc);
5423 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5424 bge_start_locked(ifp);
5425 }
5426 BGE_UNLOCK(sc);
5427
5428 return (0);
5429}
5430
5431static void
5432bge_link_upd(struct bge_softc *sc)
5433{
5434 struct mii_data *mii;
5435 uint32_t link, status;
5436
5437 BGE_LOCK_ASSERT(sc);
5438
5439 /* Clear 'pending link event' flag. */
5440 sc->bge_link_evt = 0;
5441
5442 /*
5443 * Process link state changes.
5444 * Grrr. The link status word in the status block does
5445 * not work correctly on the BCM5700 rev AX and BX chips,
5446 * according to all available information. Hence, we have
5447 * to enable MII interrupts in order to properly obtain
5448 * async link changes. Unfortunately, this also means that
5449 * we have to read the MAC status register to detect link
5450 * changes, thereby adding an additional register access to
5451 * the interrupt handler.
5452 *
5453 * XXX: perhaps link state detection procedure used for
5454 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5455 */
5456
5457 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5458 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5459 status = CSR_READ_4(sc, BGE_MAC_STS);
5460 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5461 mii = device_get_softc(sc->bge_miibus);
5462 mii_pollstat(mii);
5463 if (!sc->bge_link &&
5464 mii->mii_media_status & IFM_ACTIVE &&
5465 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5466 sc->bge_link++;
5467 if (bootverbose)
5468 if_printf(sc->bge_ifp, "link UP\n");
5469 } else if (sc->bge_link &&
5470 (!(mii->mii_media_status & IFM_ACTIVE) ||
5471 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5472 sc->bge_link = 0;
5473 if (bootverbose)
5474 if_printf(sc->bge_ifp, "link DOWN\n");
5475 }
5476
5477 /* Clear the interrupt. */
5478 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5479 BGE_EVTENB_MI_INTERRUPT);
5480 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5481 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5482 BRGPHY_INTRS);
5483 }
5484 return;
5485 }
5486
5487 if (sc->bge_flags & BGE_FLAG_TBI) {
5488 status = CSR_READ_4(sc, BGE_MAC_STS);
5489 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5490 if (!sc->bge_link) {
5491 sc->bge_link++;
5492 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5493 BGE_CLRBIT(sc, BGE_MAC_MODE,
5494 BGE_MACMODE_TBI_SEND_CFGS);
5495 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5496 if (bootverbose)
5497 if_printf(sc->bge_ifp, "link UP\n");
5498 if_link_state_change(sc->bge_ifp,
5499 LINK_STATE_UP);
5500 }
5501 } else if (sc->bge_link) {
5502 sc->bge_link = 0;
5503 if (bootverbose)
5504 if_printf(sc->bge_ifp, "link DOWN\n");
5505 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5506 }
5507 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5508 /*
5509 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5510 * in status word always set. Workaround this bug by reading
5511 * PHY link status directly.
5512 */
5513 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5514
5515 if (link != sc->bge_link ||
5516 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5517 mii = device_get_softc(sc->bge_miibus);
5518 mii_pollstat(mii);
5519 if (!sc->bge_link &&
5520 mii->mii_media_status & IFM_ACTIVE &&
5521 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5522 sc->bge_link++;
5523 if (bootverbose)
5524 if_printf(sc->bge_ifp, "link UP\n");
5525 } else if (sc->bge_link &&
5526 (!(mii->mii_media_status & IFM_ACTIVE) ||
5527 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5528 sc->bge_link = 0;
5529 if (bootverbose)
5530 if_printf(sc->bge_ifp, "link DOWN\n");
5531 }
5532 }
5533 } else {
5534 /*
5535 * For controllers that call mii_tick, we have to poll
5536 * link status.
5537 */
5538 mii = device_get_softc(sc->bge_miibus);
5539 mii_pollstat(mii);
5540 bge_miibus_statchg(sc->bge_dev);
5541 }
5542
5543 /* Clear the attention. */
5544 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5545 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5546 BGE_MACSTAT_LINK_CHANGED);
5547}
5548
5549static void
5550bge_add_sysctls(struct bge_softc *sc)
5551{
5552 struct sysctl_ctx_list *ctx;
5553 struct sysctl_oid_list *children;
5554 char tn[32];
5555 int unit;
5556
5557 ctx = device_get_sysctl_ctx(sc->bge_dev);
5558 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5559
5560#ifdef BGE_REGISTER_DEBUG
5561 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5562 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5563 "Debug Information");
5564
5565 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5566 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5567 "Register Read");
5568
5569 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5570 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5571 "Memory Read");
5572
5573#endif
5574
5575 unit = device_get_unit(sc->bge_dev);
5576 /*
5577 * A common design characteristic for many Broadcom client controllers
5578 * is that they only support a single outstanding DMA read operation
5579 * on the PCIe bus. This means that it will take twice as long to fetch
5580 * a TX frame that is split into header and payload buffers as it does
5581 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5582 * these controllers, coalescing buffers to reduce the number of memory
5583 * reads is effective way to get maximum performance(about 940Mbps).
5584 * Without collapsing TX buffers the maximum TCP bulk transfer
5585 * performance is about 850Mbps. However forcing coalescing mbufs
5586 * consumes a lot of CPU cycles, so leave it off by default.
5587 */
5588 sc->bge_forced_collapse = 0;
5589 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5590 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5591 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5592 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5593 "Number of fragmented TX buffers of a frame allowed before "
5594 "forced collapsing");
5595
5596 /*
5597 * It seems all Broadcom controllers have a bug that can generate UDP
5598 * datagrams with checksum value 0 when TX UDP checksum offloading is
5599 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5600 * Even though the probability of generating such UDP datagrams is
5601 * low, I don't want to see FreeBSD boxes to inject such datagrams
5602 * into network so disable UDP checksum offloading by default. Users
5603 * still override this behavior by setting a sysctl variable,
5604 * dev.bge.0.forced_udpcsum.
5605 */
5606 sc->bge_forced_udpcsum = 0;
5607 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5608 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5609 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5610 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5611 "Enable UDP checksum offloading even if controller can "
5612 "generate UDP checksum value 0");
5613
5614 if (BGE_IS_5705_PLUS(sc))
5615 bge_add_sysctl_stats_regs(sc, ctx, children);
5616 else
5617 bge_add_sysctl_stats(sc, ctx, children);
5618}
5619
5620#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5621 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5622 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5623 desc)
5624
5625static void
5626bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5627 struct sysctl_oid_list *parent)
5628{
5629 struct sysctl_oid *tree;
5630 struct sysctl_oid_list *children, *schildren;
5631
5632 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5633 NULL, "BGE Statistics");
5634 schildren = children = SYSCTL_CHILDREN(tree);
5635 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5636 children, COSFramesDroppedDueToFilters,
5637 "FramesDroppedDueToFilters");
5638 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5639 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5640 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5641 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5642 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5643 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5644 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5645 children, ifInDiscards, "InputDiscards");
5646 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5647 children, ifInErrors, "InputErrors");
5648 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5649 children, nicRecvThresholdHit, "RecvThresholdHit");
5650 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5651 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5652 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5653 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5654 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5655 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5656 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5657 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5658 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5659 children, nicRingStatusUpdate, "RingStatusUpdate");
5660 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5661 children, nicInterrupts, "Interrupts");
5662 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5663 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5664 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5665 children, nicSendThresholdHit, "SendThresholdHit");
5666
5667 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5668 NULL, "BGE RX Statistics");
5669 children = SYSCTL_CHILDREN(tree);
5670 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5671 children, rxstats.ifHCInOctets, "ifHCInOctets");
5672 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5673 children, rxstats.etherStatsFragments, "Fragments");
5674 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5675 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5676 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5677 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5678 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5679 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5680 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5681 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5682 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5683 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5684 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5685 children, rxstats.xoffPauseFramesReceived,
5686 "xoffPauseFramesReceived");
5687 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5688 children, rxstats.macControlFramesReceived,
5689 "ControlFramesReceived");
5690 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5691 children, rxstats.xoffStateEntered, "xoffStateEntered");
5692 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5693 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5694 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5695 children, rxstats.etherStatsJabbers, "Jabbers");
5696 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5697 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5698 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5699 children, rxstats.inRangeLengthError, "inRangeLengthError");
5700 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5701 children, rxstats.outRangeLengthError, "outRangeLengthError");
5702
5703 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5704 NULL, "BGE TX Statistics");
5705 children = SYSCTL_CHILDREN(tree);
5706 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5707 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5708 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5709 children, txstats.etherStatsCollisions, "Collisions");
5710 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5711 children, txstats.outXonSent, "XonSent");
5712 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5713 children, txstats.outXoffSent, "XoffSent");
5714 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5715 children, txstats.flowControlDone, "flowControlDone");
5716 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5717 children, txstats.dot3StatsInternalMacTransmitErrors,
5718 "InternalMacTransmitErrors");
5719 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5720 children, txstats.dot3StatsSingleCollisionFrames,
5721 "SingleCollisionFrames");
5722 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5723 children, txstats.dot3StatsMultipleCollisionFrames,
5724 "MultipleCollisionFrames");
5725 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5726 children, txstats.dot3StatsDeferredTransmissions,
5727 "DeferredTransmissions");
5728 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5729 children, txstats.dot3StatsExcessiveCollisions,
5730 "ExcessiveCollisions");
5731 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5732 children, txstats.dot3StatsLateCollisions,
5733 "LateCollisions");
5734 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5735 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5736 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5737 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5738 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5739 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5740 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5741 children, txstats.dot3StatsCarrierSenseErrors,
5742 "CarrierSenseErrors");
5743 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5744 children, txstats.ifOutDiscards, "Discards");
5745 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5746 children, txstats.ifOutErrors, "Errors");
5747}
5748
5749#undef BGE_SYSCTL_STAT
5750
5751#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5752 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5753
5754static void
5755bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5756 struct sysctl_oid_list *parent)
5757{
5758 struct sysctl_oid *tree;
5759 struct sysctl_oid_list *child, *schild;
5760 struct bge_mac_stats *stats;
5761
5762 stats = &sc->bge_mac_stats;
5763 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5764 NULL, "BGE Statistics");
5765 schild = child = SYSCTL_CHILDREN(tree);
5766 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5767 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5768 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5769 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5770 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5771 &stats->DmaWriteHighPriQueueFull,
5772 "NIC DMA Write High Priority Queue Full");
5773 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5774 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5775 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5776 &stats->InputDiscards, "Discarded Input Frames");
5777 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5778 &stats->InputErrors, "Input Errors");
5779 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5780 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5781
5782 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5783 NULL, "BGE RX Statistics");
5784 child = SYSCTL_CHILDREN(tree);
5785 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5786 &stats->ifHCInOctets, "Inbound Octets");
5787 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5788 &stats->etherStatsFragments, "Fragments");
5789 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5790 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5791 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5792 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5793 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5794 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5795 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5796 &stats->dot3StatsFCSErrors, "FCS Errors");
5797 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5798 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5799 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5800 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5801 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5802 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5803 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5804 &stats->macControlFramesReceived, "MAC Control Frames Received");
5805 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5806 &stats->xoffStateEntered, "XOFF State Entered");
5807 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5808 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5809 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5810 &stats->etherStatsJabbers, "Jabbers");
5811 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5812 &stats->etherStatsUndersizePkts, "Undersized Packets");
5813
5814 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5815 NULL, "BGE TX Statistics");
5816 child = SYSCTL_CHILDREN(tree);
5817 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5818 &stats->ifHCOutOctets, "Outbound Octets");
5819 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5820 &stats->etherStatsCollisions, "TX Collisions");
5821 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5822 &stats->outXonSent, "XON Sent");
5823 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5824 &stats->outXoffSent, "XOFF Sent");
5825 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5826 &stats->dot3StatsInternalMacTransmitErrors,
5827 "Internal MAC TX Errors");
5828 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5829 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5830 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5831 &stats->dot3StatsMultipleCollisionFrames,
5832 "Multiple Collision Frames");
5833 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5834 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5835 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5836 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5837 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5838 &stats->dot3StatsLateCollisions, "Late Collisions");
5839 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5840 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5841 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5842 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5843 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5844 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5845}
5846
5847#undef BGE_SYSCTL_STAT_ADD64
5848
5849static int
5850bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5851{
5852 struct bge_softc *sc;
5853 uint32_t result;
5854 int offset;
5855
5856 sc = (struct bge_softc *)arg1;
5857 offset = arg2;
5858 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5859 offsetof(bge_hostaddr, bge_addr_lo));
5860 return (sysctl_handle_int(oidp, &result, 0, req));
5861}
5862
5863#ifdef BGE_REGISTER_DEBUG
5864static int
5865bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5866{
5867 struct bge_softc *sc;
5868 uint16_t *sbdata;
5869 int error, result, sbsz;
5870 int i, j;
5871
5872 result = -1;
5873 error = sysctl_handle_int(oidp, &result, 0, req);
5874 if (error || (req->newptr == NULL))
5875 return (error);
5876
5877 if (result == 1) {
5878 sc = (struct bge_softc *)arg1;
5879
5880 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5881 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5882 sbsz = BGE_STATUS_BLK_SZ;
5883 else
5884 sbsz = 32;
5885 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5886 printf("Status Block:\n");
5887 BGE_LOCK(sc);
5888 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5889 sc->bge_cdata.bge_status_map,
5890 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5891 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5892 printf("%06x:", i);
5893 for (j = 0; j < 8; j++)
5894 printf(" %04x", sbdata[i++]);
5895 printf("\n");
5896 }
5897
5898 printf("Registers:\n");
5899 for (i = 0x800; i < 0xA00; ) {
5900 printf("%06x:", i);
5901 for (j = 0; j < 8; j++) {
5902 printf(" %08x", CSR_READ_4(sc, i));
5903 i += 4;
5904 }
5905 printf("\n");
5906 }
5907 BGE_UNLOCK(sc);
5908
5909 printf("Hardware Flags:\n");
5910 if (BGE_IS_5717_PLUS(sc))
5911 printf(" - 5717 Plus\n");
5912 if (BGE_IS_5755_PLUS(sc))
5913 printf(" - 5755 Plus\n");
5914 if (BGE_IS_575X_PLUS(sc))
5915 printf(" - 575X Plus\n");
5916 if (BGE_IS_5705_PLUS(sc))
5917 printf(" - 5705 Plus\n");
5918 if (BGE_IS_5714_FAMILY(sc))
5919 printf(" - 5714 Family\n");
5920 if (BGE_IS_5700_FAMILY(sc))
5921 printf(" - 5700 Family\n");
5922 if (sc->bge_flags & BGE_FLAG_JUMBO)
5923 printf(" - Supports Jumbo Frames\n");
5924 if (sc->bge_flags & BGE_FLAG_PCIX)
5925 printf(" - PCI-X Bus\n");
5926 if (sc->bge_flags & BGE_FLAG_PCIE)
5927 printf(" - PCI Express Bus\n");
5928 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5929 printf(" - No 3 LEDs\n");
5930 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5931 printf(" - RX Alignment Bug\n");
5932 }
5933
5934 return (error);
5935}
5936
5937static int
5938bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5939{
5940 struct bge_softc *sc;
5941 int error;
5942 uint16_t result;
5943 uint32_t val;
5944
5945 result = -1;
5946 error = sysctl_handle_int(oidp, &result, 0, req);
5947 if (error || (req->newptr == NULL))
5948 return (error);
5949
5950 if (result < 0x8000) {
5951 sc = (struct bge_softc *)arg1;
5952 val = CSR_READ_4(sc, result);
5953 printf("reg 0x%06X = 0x%08X\n", result, val);
5954 }
5955
5956 return (error);
5957}
5958
5959static int
5960bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5961{
5962 struct bge_softc *sc;
5963 int error;
5964 uint16_t result;
5965 uint32_t val;
5966
5967 result = -1;
5968 error = sysctl_handle_int(oidp, &result, 0, req);
5969 if (error || (req->newptr == NULL))
5970 return (error);
5971
5972 if (result < 0x8000) {
5973 sc = (struct bge_softc *)arg1;
5974 val = bge_readmem_ind(sc, result);
5975 printf("mem 0x%06X = 0x%08X\n", result, val);
5976 }
5977
5978 return (error);
5979}
5980#endif
5981
5982static int
5983bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5984{
5985
5986 if (sc->bge_flags & BGE_FLAG_EADDR)
5987 return (1);
5988
5989#ifdef __sparc64__
5990 OF_getetheraddr(sc->bge_dev, ether_addr);
5991 return (0);
5992#endif
5993 return (1);
5994}
5995
5996static int
5997bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5998{
5999 uint32_t mac_addr;
6000
6001 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6002 if ((mac_addr >> 16) == 0x484b) {
6003 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6004 ether_addr[1] = (uint8_t)mac_addr;
6005 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6006 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6007 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6008 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6009 ether_addr[5] = (uint8_t)mac_addr;
6010 return (0);
6011 }
6012 return (1);
6013}
6014
6015static int
6016bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6017{
6018 int mac_offset = BGE_EE_MAC_OFFSET;
6019
6020 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6021 mac_offset = BGE_EE_MAC_OFFSET_5906;
6022
6023 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6024 ETHER_ADDR_LEN));
6025}
6026
6027static int
6028bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6029{
6030
6031 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6032 return (1);
6033
6034 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6035 ETHER_ADDR_LEN));
6036}
6037
6038static int
6039bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6040{
6041 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6042 /* NOTE: Order is critical */
6043 bge_get_eaddr_fw,
6044 bge_get_eaddr_mem,
6045 bge_get_eaddr_nvram,
6046 bge_get_eaddr_eeprom,
6047 NULL
6048 };
6049 const bge_eaddr_fcn_t *func;
6050
6051 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6052 if ((*func)(sc, eaddr) == 0)
6053 break;
6054 }
6055 return (*func == NULL ? ENXIO : 0);
6056}