Deleted Added
full compact
if_re.c (122689) if_re.c (123019)
1/*
2 * Copyright (c) 1997, 1998-2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
1/*
2 * Copyright (c) 1997, 1998-2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/re/if_re.c 122689 2003-11-14 19:00:32Z sam $");
34__FBSDID("$FreeBSD: head/sys/dev/re/if_re.c 123019 2003-11-28 05:28:29Z imp $");
35
36/*
37 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44/*
45 * This driver is designed to support RealTek's next generation of
46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47 * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
48 * and the RTL8110S.
49 *
50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51 * with the older 8139 family, however it also supports a special
52 * C+ mode of operation that provides several new performance enhancing
53 * features. These include:
54 *
55 * o Descriptor based DMA mechanism. Each descriptor represents
56 * a single packet fragment. Data buffers may be aligned on
57 * any byte boundary.
58 *
59 * o 64-bit DMA
60 *
61 * o TCP/IP checksum offload for both RX and TX
62 *
63 * o High and normal priority transmit DMA rings
64 *
65 * o VLAN tag insertion and extraction
66 *
67 * o TCP large send (segmentation offload)
68 *
69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70 * programming API is fairly straightforward. The RX filtering, EEPROM
71 * access and PHY access is the same as it is on the older 8139 series
72 * chips.
73 *
74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75 * same programming API and feature set as the 8139C+ with the following
76 * differences and additions:
77 *
78 * o 1000Mbps mode
79 *
80 * o Jumbo frames
81 *
82 * o GMII and TBI ports/registers for interfacing with copper
83 * or fiber PHYs
84 *
85 * o RX and TX DMA rings can have up to 1024 descriptors
86 * (the 8139C+ allows a maximum of 64)
87 *
88 * o Slight differences in register layout from the 8139C+
89 *
90 * The TX start and timer interrupt registers are at different locations
91 * on the 8169 than they are on the 8139C+. Also, the status word in the
92 * RX descriptor has a slightly different bit layout. The 8169 does not
93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94 * copper gigE PHY.
95 *
96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97 * (the 'S' stands for 'single-chip'). These devices have the same
98 * programming API as the older 8169, but also have some vendor-specific
99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101 *
102 * This driver takes advantage of the RX and TX checksum offload and
103 * VLAN tag insertion/extraction features. It also implements TX
104 * interrupt moderation using the timer interrupt registers, which
105 * significantly reduces TX interrupt load. There is also support
106 * for jumbo frames, however the 8169/8169S/8110S can not transmit
107 * jumbo frames larger than 7.5K, so the max MTU possible with this
108 * driver is 7500 bytes.
109 */
110
111#include <sys/param.h>
112#include <sys/endian.h>
113#include <sys/systm.h>
114#include <sys/sockio.h>
115#include <sys/mbuf.h>
116#include <sys/malloc.h>
117#include <sys/kernel.h>
118#include <sys/socket.h>
119
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/ethernet.h>
123#include <net/if_dl.h>
124#include <net/if_media.h>
125#include <net/if_vlan_var.h>
126
127#include <net/bpf.h>
128
129#include <machine/bus_pio.h>
130#include <machine/bus_memio.h>
131#include <machine/bus.h>
132#include <machine/resource.h>
133#include <sys/bus.h>
134#include <sys/rman.h>
135
136#include <dev/mii/mii.h>
137#include <dev/mii/miivar.h>
138
139#include <dev/pci/pcireg.h>
140#include <dev/pci/pcivar.h>
141
142MODULE_DEPEND(re, pci, 1, 1, 1);
143MODULE_DEPEND(re, ether, 1, 1, 1);
144MODULE_DEPEND(re, miibus, 1, 1, 1);
145
146/* "controller miibus0" required. See GENERIC if you get errors here. */
147#include "miibus_if.h"
148
149/*
150 * Default to using PIO access for this driver.
151 */
152#define RE_USEIOSPACE
153
154#include <pci/if_rlreg.h>
155
156#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
157
158/*
159 * Various supported device vendors/types and their names.
160 */
161static struct rl_type re_devs[] = {
162 { RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS,
163 "RealTek 8139C+ 10/100BaseTX" },
164 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169,
165 "RealTek 8169 Gigabit Ethernet" },
166 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S,
167 "RealTek 8169S Single-chip Gigabit Ethernet" },
168 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S,
169 "RealTek 8110S Single-chip Gigabit Ethernet" },
170 { 0, 0, 0, NULL }
171};
172
173static struct rl_hwrev re_hwrevs[] = {
174 { RL_HWREV_8139, RL_8139, "" },
175 { RL_HWREV_8139A, RL_8139, "A" },
176 { RL_HWREV_8139AG, RL_8139, "A-G" },
177 { RL_HWREV_8139B, RL_8139, "B" },
178 { RL_HWREV_8130, RL_8139, "8130" },
179 { RL_HWREV_8139C, RL_8139, "C" },
180 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" },
181 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"},
182 { RL_HWREV_8169, RL_8169, "8169"},
183 { RL_HWREV_8169S, RL_8169, "8169S"},
184 { RL_HWREV_8110S, RL_8169, "8110S"},
185 { RL_HWREV_8100, RL_8139, "8100"},
186 { RL_HWREV_8101, RL_8139, "8101"},
187 { 0, 0, NULL }
188};
189
190static int re_probe (device_t);
191static int re_attach (device_t);
192static int re_detach (device_t);
193
194static int re_encap (struct rl_softc *, struct mbuf *, int *);
195
196static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
197static void re_dma_map_desc (void *, bus_dma_segment_t *, int,
198 bus_size_t, int);
199static int re_allocmem (device_t, struct rl_softc *);
200static int re_newbuf (struct rl_softc *, int, struct mbuf *);
201static int re_rx_list_init (struct rl_softc *);
202static int re_tx_list_init (struct rl_softc *);
203static void re_rxeof (struct rl_softc *);
204static void re_txeof (struct rl_softc *);
205static void re_intr (void *);
206static void re_tick (void *);
207static void re_start (struct ifnet *);
208static int re_ioctl (struct ifnet *, u_long, caddr_t);
209static void re_init (void *);
210static void re_stop (struct rl_softc *);
211static void re_watchdog (struct ifnet *);
212static int re_suspend (device_t);
213static int re_resume (device_t);
214static void re_shutdown (device_t);
215static int re_ifmedia_upd (struct ifnet *);
216static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
217
218static void re_eeprom_putbyte (struct rl_softc *, int);
219static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
220static void re_read_eeprom (struct rl_softc *, caddr_t, int, int, int);
221static int re_gmii_readreg (device_t, int, int);
222static int re_gmii_writereg (device_t, int, int, int);
223
224static int re_miibus_readreg (device_t, int, int);
225static int re_miibus_writereg (device_t, int, int, int);
226static void re_miibus_statchg (device_t);
227
228static u_int32_t re_mchash (caddr_t);
229static void re_setmulti (struct rl_softc *);
230static void re_reset (struct rl_softc *);
231
232static int re_diag (struct rl_softc *);
233
234#ifdef RE_USEIOSPACE
235#define RL_RES SYS_RES_IOPORT
236#define RL_RID RL_PCI_LOIO
237#else
238#define RL_RES SYS_RES_MEMORY
239#define RL_RID RL_PCI_LOMEM
240#endif
241
242static device_method_t re_methods[] = {
243 /* Device interface */
244 DEVMETHOD(device_probe, re_probe),
245 DEVMETHOD(device_attach, re_attach),
246 DEVMETHOD(device_detach, re_detach),
247 DEVMETHOD(device_suspend, re_suspend),
248 DEVMETHOD(device_resume, re_resume),
249 DEVMETHOD(device_shutdown, re_shutdown),
250
251 /* bus interface */
252 DEVMETHOD(bus_print_child, bus_generic_print_child),
253 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
254
255 /* MII interface */
256 DEVMETHOD(miibus_readreg, re_miibus_readreg),
257 DEVMETHOD(miibus_writereg, re_miibus_writereg),
258 DEVMETHOD(miibus_statchg, re_miibus_statchg),
259
260 { 0, 0 }
261};
262
263static driver_t re_driver = {
264 "re",
265 re_methods,
266 sizeof(struct rl_softc)
267};
268
269static devclass_t re_devclass;
270
271DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
35
36/*
37 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44/*
45 * This driver is designed to support RealTek's next generation of
46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47 * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
48 * and the RTL8110S.
49 *
50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51 * with the older 8139 family, however it also supports a special
52 * C+ mode of operation that provides several new performance enhancing
53 * features. These include:
54 *
55 * o Descriptor based DMA mechanism. Each descriptor represents
56 * a single packet fragment. Data buffers may be aligned on
57 * any byte boundary.
58 *
59 * o 64-bit DMA
60 *
61 * o TCP/IP checksum offload for both RX and TX
62 *
63 * o High and normal priority transmit DMA rings
64 *
65 * o VLAN tag insertion and extraction
66 *
67 * o TCP large send (segmentation offload)
68 *
69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70 * programming API is fairly straightforward. The RX filtering, EEPROM
71 * access and PHY access is the same as it is on the older 8139 series
72 * chips.
73 *
74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75 * same programming API and feature set as the 8139C+ with the following
76 * differences and additions:
77 *
78 * o 1000Mbps mode
79 *
80 * o Jumbo frames
81 *
82 * o GMII and TBI ports/registers for interfacing with copper
83 * or fiber PHYs
84 *
85 * o RX and TX DMA rings can have up to 1024 descriptors
86 * (the 8139C+ allows a maximum of 64)
87 *
88 * o Slight differences in register layout from the 8139C+
89 *
90 * The TX start and timer interrupt registers are at different locations
91 * on the 8169 than they are on the 8139C+. Also, the status word in the
92 * RX descriptor has a slightly different bit layout. The 8169 does not
93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94 * copper gigE PHY.
95 *
96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97 * (the 'S' stands for 'single-chip'). These devices have the same
98 * programming API as the older 8169, but also have some vendor-specific
99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101 *
102 * This driver takes advantage of the RX and TX checksum offload and
103 * VLAN tag insertion/extraction features. It also implements TX
104 * interrupt moderation using the timer interrupt registers, which
105 * significantly reduces TX interrupt load. There is also support
106 * for jumbo frames, however the 8169/8169S/8110S can not transmit
107 * jumbo frames larger than 7.5K, so the max MTU possible with this
108 * driver is 7500 bytes.
109 */
110
111#include <sys/param.h>
112#include <sys/endian.h>
113#include <sys/systm.h>
114#include <sys/sockio.h>
115#include <sys/mbuf.h>
116#include <sys/malloc.h>
117#include <sys/kernel.h>
118#include <sys/socket.h>
119
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/ethernet.h>
123#include <net/if_dl.h>
124#include <net/if_media.h>
125#include <net/if_vlan_var.h>
126
127#include <net/bpf.h>
128
129#include <machine/bus_pio.h>
130#include <machine/bus_memio.h>
131#include <machine/bus.h>
132#include <machine/resource.h>
133#include <sys/bus.h>
134#include <sys/rman.h>
135
136#include <dev/mii/mii.h>
137#include <dev/mii/miivar.h>
138
139#include <dev/pci/pcireg.h>
140#include <dev/pci/pcivar.h>
141
142MODULE_DEPEND(re, pci, 1, 1, 1);
143MODULE_DEPEND(re, ether, 1, 1, 1);
144MODULE_DEPEND(re, miibus, 1, 1, 1);
145
146/* "controller miibus0" required. See GENERIC if you get errors here. */
147#include "miibus_if.h"
148
149/*
150 * Default to using PIO access for this driver.
151 */
152#define RE_USEIOSPACE
153
154#include <pci/if_rlreg.h>
155
156#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
157
158/*
159 * Various supported device vendors/types and their names.
160 */
161static struct rl_type re_devs[] = {
162 { RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS,
163 "RealTek 8139C+ 10/100BaseTX" },
164 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169,
165 "RealTek 8169 Gigabit Ethernet" },
166 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S,
167 "RealTek 8169S Single-chip Gigabit Ethernet" },
168 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S,
169 "RealTek 8110S Single-chip Gigabit Ethernet" },
170 { 0, 0, 0, NULL }
171};
172
173static struct rl_hwrev re_hwrevs[] = {
174 { RL_HWREV_8139, RL_8139, "" },
175 { RL_HWREV_8139A, RL_8139, "A" },
176 { RL_HWREV_8139AG, RL_8139, "A-G" },
177 { RL_HWREV_8139B, RL_8139, "B" },
178 { RL_HWREV_8130, RL_8139, "8130" },
179 { RL_HWREV_8139C, RL_8139, "C" },
180 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" },
181 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"},
182 { RL_HWREV_8169, RL_8169, "8169"},
183 { RL_HWREV_8169S, RL_8169, "8169S"},
184 { RL_HWREV_8110S, RL_8169, "8110S"},
185 { RL_HWREV_8100, RL_8139, "8100"},
186 { RL_HWREV_8101, RL_8139, "8101"},
187 { 0, 0, NULL }
188};
189
190static int re_probe (device_t);
191static int re_attach (device_t);
192static int re_detach (device_t);
193
194static int re_encap (struct rl_softc *, struct mbuf *, int *);
195
196static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
197static void re_dma_map_desc (void *, bus_dma_segment_t *, int,
198 bus_size_t, int);
199static int re_allocmem (device_t, struct rl_softc *);
200static int re_newbuf (struct rl_softc *, int, struct mbuf *);
201static int re_rx_list_init (struct rl_softc *);
202static int re_tx_list_init (struct rl_softc *);
203static void re_rxeof (struct rl_softc *);
204static void re_txeof (struct rl_softc *);
205static void re_intr (void *);
206static void re_tick (void *);
207static void re_start (struct ifnet *);
208static int re_ioctl (struct ifnet *, u_long, caddr_t);
209static void re_init (void *);
210static void re_stop (struct rl_softc *);
211static void re_watchdog (struct ifnet *);
212static int re_suspend (device_t);
213static int re_resume (device_t);
214static void re_shutdown (device_t);
215static int re_ifmedia_upd (struct ifnet *);
216static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
217
218static void re_eeprom_putbyte (struct rl_softc *, int);
219static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
220static void re_read_eeprom (struct rl_softc *, caddr_t, int, int, int);
221static int re_gmii_readreg (device_t, int, int);
222static int re_gmii_writereg (device_t, int, int, int);
223
224static int re_miibus_readreg (device_t, int, int);
225static int re_miibus_writereg (device_t, int, int, int);
226static void re_miibus_statchg (device_t);
227
228static u_int32_t re_mchash (caddr_t);
229static void re_setmulti (struct rl_softc *);
230static void re_reset (struct rl_softc *);
231
232static int re_diag (struct rl_softc *);
233
234#ifdef RE_USEIOSPACE
235#define RL_RES SYS_RES_IOPORT
236#define RL_RID RL_PCI_LOIO
237#else
238#define RL_RES SYS_RES_MEMORY
239#define RL_RID RL_PCI_LOMEM
240#endif
241
242static device_method_t re_methods[] = {
243 /* Device interface */
244 DEVMETHOD(device_probe, re_probe),
245 DEVMETHOD(device_attach, re_attach),
246 DEVMETHOD(device_detach, re_detach),
247 DEVMETHOD(device_suspend, re_suspend),
248 DEVMETHOD(device_resume, re_resume),
249 DEVMETHOD(device_shutdown, re_shutdown),
250
251 /* bus interface */
252 DEVMETHOD(bus_print_child, bus_generic_print_child),
253 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
254
255 /* MII interface */
256 DEVMETHOD(miibus_readreg, re_miibus_readreg),
257 DEVMETHOD(miibus_writereg, re_miibus_writereg),
258 DEVMETHOD(miibus_statchg, re_miibus_statchg),
259
260 { 0, 0 }
261};
262
263static driver_t re_driver = {
264 "re",
265 re_methods,
266 sizeof(struct rl_softc)
267};
268
269static devclass_t re_devclass;
270
271DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
272DRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0);
272DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
273
274#define EE_SET(x) \
275 CSR_WRITE_1(sc, RL_EECMD, \
276 CSR_READ_1(sc, RL_EECMD) | x)
277
278#define EE_CLR(x) \
279 CSR_WRITE_1(sc, RL_EECMD, \
280 CSR_READ_1(sc, RL_EECMD) & ~x)
281
282/*
283 * Send a read command and address to the EEPROM, check for ACK.
284 */
285static void
286re_eeprom_putbyte(sc, addr)
287 struct rl_softc *sc;
288 int addr;
289{
290 register int d, i;
291
292 d = addr | sc->rl_eecmd_read;
293
294 /*
295 * Feed in each bit and strobe the clock.
296 */
297 for (i = 0x400; i; i >>= 1) {
298 if (d & i) {
299 EE_SET(RL_EE_DATAIN);
300 } else {
301 EE_CLR(RL_EE_DATAIN);
302 }
303 DELAY(100);
304 EE_SET(RL_EE_CLK);
305 DELAY(150);
306 EE_CLR(RL_EE_CLK);
307 DELAY(100);
308 }
309
310 return;
311}
312
313/*
314 * Read a word of data stored in the EEPROM at address 'addr.'
315 */
316static void
317re_eeprom_getword(sc, addr, dest)
318 struct rl_softc *sc;
319 int addr;
320 u_int16_t *dest;
321{
322 register int i;
323 u_int16_t word = 0;
324
325 /* Enter EEPROM access mode. */
326 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
327
328 /*
329 * Send address of word we want to read.
330 */
331 re_eeprom_putbyte(sc, addr);
332
333 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
334
335 /*
336 * Start reading bits from EEPROM.
337 */
338 for (i = 0x8000; i; i >>= 1) {
339 EE_SET(RL_EE_CLK);
340 DELAY(100);
341 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
342 word |= i;
343 EE_CLR(RL_EE_CLK);
344 DELAY(100);
345 }
346
347 /* Turn off EEPROM access mode. */
348 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
349
350 *dest = word;
351
352 return;
353}
354
355/*
356 * Read a sequence of words from the EEPROM.
357 */
358static void
359re_read_eeprom(sc, dest, off, cnt, swap)
360 struct rl_softc *sc;
361 caddr_t dest;
362 int off;
363 int cnt;
364 int swap;
365{
366 int i;
367 u_int16_t word = 0, *ptr;
368
369 for (i = 0; i < cnt; i++) {
370 re_eeprom_getword(sc, off + i, &word);
371 ptr = (u_int16_t *)(dest + (i * 2));
372 if (swap)
373 *ptr = ntohs(word);
374 else
375 *ptr = word;
376 }
377
378 return;
379}
380
381static int
382re_gmii_readreg(dev, phy, reg)
383 device_t dev;
384 int phy, reg;
385{
386 struct rl_softc *sc;
387 u_int32_t rval;
388 int i;
389
390 if (phy != 1)
391 return(0);
392
393 sc = device_get_softc(dev);
394
395 /* Let the rgephy driver read the GMEDIASTAT register */
396
397 if (reg == RL_GMEDIASTAT) {
398 rval = CSR_READ_1(sc, RL_GMEDIASTAT);
399 return(rval);
400 }
401
402 CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
403 DELAY(1000);
404
405 for (i = 0; i < RL_TIMEOUT; i++) {
406 rval = CSR_READ_4(sc, RL_PHYAR);
407 if (rval & RL_PHYAR_BUSY)
408 break;
409 DELAY(100);
410 }
411
412 if (i == RL_TIMEOUT) {
413 printf ("re%d: PHY read failed\n", sc->rl_unit);
414 return (0);
415 }
416
417 return (rval & RL_PHYAR_PHYDATA);
418}
419
420static int
421re_gmii_writereg(dev, phy, reg, data)
422 device_t dev;
423 int phy, reg, data;
424{
425 struct rl_softc *sc;
426 u_int32_t rval;
427 int i;
428
429 sc = device_get_softc(dev);
430
431 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
432 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
433 DELAY(1000);
434
435 for (i = 0; i < RL_TIMEOUT; i++) {
436 rval = CSR_READ_4(sc, RL_PHYAR);
437 if (!(rval & RL_PHYAR_BUSY))
438 break;
439 DELAY(100);
440 }
441
442 if (i == RL_TIMEOUT) {
443 printf ("re%d: PHY write failed\n", sc->rl_unit);
444 return (0);
445 }
446
447 return (0);
448}
449
450static int
451re_miibus_readreg(dev, phy, reg)
452 device_t dev;
453 int phy, reg;
454{
455 struct rl_softc *sc;
456 u_int16_t rval = 0;
457 u_int16_t re8139_reg = 0;
458
459 sc = device_get_softc(dev);
460 RL_LOCK(sc);
461
462 if (sc->rl_type == RL_8169) {
463 rval = re_gmii_readreg(dev, phy, reg);
464 RL_UNLOCK(sc);
465 return (rval);
466 }
467
468 /* Pretend the internal PHY is only at address 0 */
469 if (phy) {
470 RL_UNLOCK(sc);
471 return(0);
472 }
473 switch(reg) {
474 case MII_BMCR:
475 re8139_reg = RL_BMCR;
476 break;
477 case MII_BMSR:
478 re8139_reg = RL_BMSR;
479 break;
480 case MII_ANAR:
481 re8139_reg = RL_ANAR;
482 break;
483 case MII_ANER:
484 re8139_reg = RL_ANER;
485 break;
486 case MII_ANLPAR:
487 re8139_reg = RL_LPAR;
488 break;
489 case MII_PHYIDR1:
490 case MII_PHYIDR2:
491 RL_UNLOCK(sc);
492 return(0);
493 /*
494 * Allow the rlphy driver to read the media status
495 * register. If we have a link partner which does not
496 * support NWAY, this is the register which will tell
497 * us the results of parallel detection.
498 */
499 case RL_MEDIASTAT:
500 rval = CSR_READ_1(sc, RL_MEDIASTAT);
501 RL_UNLOCK(sc);
502 return(rval);
503 default:
504 printf("re%d: bad phy register\n", sc->rl_unit);
505 RL_UNLOCK(sc);
506 return(0);
507 }
508 rval = CSR_READ_2(sc, re8139_reg);
509 RL_UNLOCK(sc);
510 return(rval);
511}
512
513static int
514re_miibus_writereg(dev, phy, reg, data)
515 device_t dev;
516 int phy, reg, data;
517{
518 struct rl_softc *sc;
519 u_int16_t re8139_reg = 0;
520 int rval = 0;
521
522 sc = device_get_softc(dev);
523 RL_LOCK(sc);
524
525 if (sc->rl_type == RL_8169) {
526 rval = re_gmii_writereg(dev, phy, reg, data);
527 RL_UNLOCK(sc);
528 return (rval);
529 }
530
531 /* Pretend the internal PHY is only at address 0 */
532 if (phy) {
533 RL_UNLOCK(sc);
534 return(0);
535 }
536 switch(reg) {
537 case MII_BMCR:
538 re8139_reg = RL_BMCR;
539 break;
540 case MII_BMSR:
541 re8139_reg = RL_BMSR;
542 break;
543 case MII_ANAR:
544 re8139_reg = RL_ANAR;
545 break;
546 case MII_ANER:
547 re8139_reg = RL_ANER;
548 break;
549 case MII_ANLPAR:
550 re8139_reg = RL_LPAR;
551 break;
552 case MII_PHYIDR1:
553 case MII_PHYIDR2:
554 RL_UNLOCK(sc);
555 return(0);
556 break;
557 default:
558 printf("re%d: bad phy register\n", sc->rl_unit);
559 RL_UNLOCK(sc);
560 return(0);
561 }
562 CSR_WRITE_2(sc, re8139_reg, data);
563 RL_UNLOCK(sc);
564 return(0);
565}
566
567static void
568re_miibus_statchg(dev)
569 device_t dev;
570{
571 return;
572}
573
574/*
575 * Calculate CRC of a multicast group address, return the upper 6 bits.
576 */
577static u_int32_t
578re_mchash(addr)
579 caddr_t addr;
580{
581 u_int32_t crc, carry;
582 int idx, bit;
583 u_int8_t data;
584
585 /* Compute CRC for the address value. */
586 crc = 0xFFFFFFFF; /* initial value */
587
588 for (idx = 0; idx < 6; idx++) {
589 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
590 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
591 crc <<= 1;
592 if (carry)
593 crc = (crc ^ 0x04c11db6) | carry;
594 }
595 }
596
597 /* return the filter bit position */
598 return(crc >> 26);
599}
600
601/*
602 * Program the 64-bit multicast hash filter.
603 */
604static void
605re_setmulti(sc)
606 struct rl_softc *sc;
607{
608 struct ifnet *ifp;
609 int h = 0;
610 u_int32_t hashes[2] = { 0, 0 };
611 struct ifmultiaddr *ifma;
612 u_int32_t rxfilt;
613 int mcnt = 0;
614
615 ifp = &sc->arpcom.ac_if;
616
617 rxfilt = CSR_READ_4(sc, RL_RXCFG);
618
619 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
620 rxfilt |= RL_RXCFG_RX_MULTI;
621 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
622 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
623 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
624 return;
625 }
626
627 /* first, zot all the existing hash bits */
628 CSR_WRITE_4(sc, RL_MAR0, 0);
629 CSR_WRITE_4(sc, RL_MAR4, 0);
630
631 /* now program new ones */
632 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
633 if (ifma->ifma_addr->sa_family != AF_LINK)
634 continue;
635 h = re_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
636 if (h < 32)
637 hashes[0] |= (1 << h);
638 else
639 hashes[1] |= (1 << (h - 32));
640 mcnt++;
641 }
642
643 if (mcnt)
644 rxfilt |= RL_RXCFG_RX_MULTI;
645 else
646 rxfilt &= ~RL_RXCFG_RX_MULTI;
647
648 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
649 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
650 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
651
652 return;
653}
654
655static void
656re_reset(sc)
657 struct rl_softc *sc;
658{
659 register int i;
660
661 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
662
663 for (i = 0; i < RL_TIMEOUT; i++) {
664 DELAY(10);
665 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
666 break;
667 }
668 if (i == RL_TIMEOUT)
669 printf("re%d: reset never completed!\n", sc->rl_unit);
670
671 CSR_WRITE_1(sc, 0x82, 1);
672
673 return;
674}
675
676/*
677 * The following routine is designed to test for a defect on some
678 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
679 * lines connected to the bus, however for a 32-bit only card, they
680 * should be pulled high. The result of this defect is that the
681 * NIC will not work right if you plug it into a 64-bit slot: DMA
682 * operations will be done with 64-bit transfers, which will fail
683 * because the 64-bit data lines aren't connected.
684 *
685 * There's no way to work around this (short of talking a soldering
686 * iron to the board), however we can detect it. The method we use
687 * here is to put the NIC into digital loopback mode, set the receiver
688 * to promiscuous mode, and then try to send a frame. We then compare
689 * the frame data we sent to what was received. If the data matches,
690 * then the NIC is working correctly, otherwise we know the user has
691 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
692 * slot. In the latter case, there's no way the NIC can work correctly,
693 * so we print out a message on the console and abort the device attach.
694 */
695
696static int
697re_diag(sc)
698 struct rl_softc *sc;
699{
700 struct ifnet *ifp = &sc->arpcom.ac_if;
701 struct mbuf *m0;
702 struct ether_header *eh;
703 struct rl_desc *cur_rx;
704 u_int16_t status;
705 u_int32_t rxstat;
706 int total_len, i, error = 0;
707 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
708 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
709
710 /* Allocate a single mbuf */
711
712 MGETHDR(m0, M_DONTWAIT, MT_DATA);
713 if (m0 == NULL)
714 return(ENOBUFS);
715
716 /*
717 * Initialize the NIC in test mode. This sets the chip up
718 * so that it can send and receive frames, but performs the
719 * following special functions:
720 * - Puts receiver in promiscuous mode
721 * - Enables digital loopback mode
722 * - Leaves interrupts turned off
723 */
724
725 ifp->if_flags |= IFF_PROMISC;
726 sc->rl_testmode = 1;
727 re_init(sc);
728 re_stop(sc);
729 DELAY(100000);
730 re_init(sc);
731
732 /* Put some data in the mbuf */
733
734 eh = mtod(m0, struct ether_header *);
735 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
736 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
737 eh->ether_type = htons(ETHERTYPE_IP);
738 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
739
740 /*
741 * Queue the packet, start transmission.
742 * Note: IF_HANDOFF() ultimately calls re_start() for us.
743 */
744
745 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
746 IF_HANDOFF(&ifp->if_snd, m0, ifp);
747 m0 = NULL;
748
749 /* Wait for it to propagate through the chip */
750
751 DELAY(100000);
752 for (i = 0; i < RL_TIMEOUT; i++) {
753 status = CSR_READ_2(sc, RL_ISR);
754 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
755 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
756 break;
757 DELAY(10);
758 }
759
760 if (i == RL_TIMEOUT) {
761 printf("re%d: diagnostic failed, failed to receive packet "
762 "in loopback mode\n", sc->rl_unit);
763 error = EIO;
764 goto done;
765 }
766
767 /*
768 * The packet should have been dumped into the first
769 * entry in the RX DMA ring. Grab it from there.
770 */
771
772 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
773 sc->rl_ldata.rl_rx_list_map,
774 BUS_DMASYNC_POSTREAD);
775 bus_dmamap_sync(sc->rl_ldata.rl_mtag,
776 sc->rl_ldata.rl_rx_dmamap[0],
777 BUS_DMASYNC_POSTWRITE);
778 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
779 sc->rl_ldata.rl_rx_dmamap[0]);
780
781 m0 = sc->rl_ldata.rl_rx_mbuf[0];
782 sc->rl_ldata.rl_rx_mbuf[0] = NULL;
783 eh = mtod(m0, struct ether_header *);
784
785 cur_rx = &sc->rl_ldata.rl_rx_list[0];
786 total_len = RL_RXBYTES(cur_rx);
787 rxstat = le32toh(cur_rx->rl_cmdstat);
788
789 if (total_len != ETHER_MIN_LEN) {
790 printf("re%d: diagnostic failed, received short packet\n",
791 sc->rl_unit);
792 error = EIO;
793 goto done;
794 }
795
796 /* Test that the received packet data matches what we sent. */
797
798 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
799 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
800 ntohs(eh->ether_type) != ETHERTYPE_IP) {
801 printf("re%d: WARNING, DMA FAILURE!\n", sc->rl_unit);
802 printf("re%d: expected TX data: %6D/%6D/0x%x\n", sc->rl_unit,
803 dst, ":", src, ":", ETHERTYPE_IP);
804 printf("re%d: received RX data: %6D/%6D/0x%x\n", sc->rl_unit,
805 eh->ether_dhost, ":", eh->ether_shost, ":",
806 ntohs(eh->ether_type));
807 printf("re%d: You may have a defective 32-bit NIC plugged "
808 "into a 64-bit PCI slot.\n", sc->rl_unit);
809 printf("re%d: Please re-install the NIC in a 32-bit slot "
810 "for proper operation.\n", sc->rl_unit);
811 printf("re%d: Read the re(4) man page for more details.\n",
812 sc->rl_unit);
813 error = EIO;
814 }
815
816done:
817 /* Turn interface off, release resources */
818
819 sc->rl_testmode = 0;
820 ifp->if_flags &= ~IFF_PROMISC;
821 re_stop(sc);
822 if (m0 != NULL)
823 m_freem(m0);
824
825 return (error);
826}
827
828/*
829 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
830 * IDs against our list and return a device name if we find a match.
831 */
832static int
833re_probe(dev)
834 device_t dev;
835{
836 struct rl_type *t;
837 struct rl_softc *sc;
838 int rid;
839 u_int32_t hwrev;
840
841 t = re_devs;
842 sc = device_get_softc(dev);
843
844 while(t->rl_name != NULL) {
845 if ((pci_get_vendor(dev) == t->rl_vid) &&
846 (pci_get_device(dev) == t->rl_did)) {
847
848 /*
849 * Temporarily map the I/O space
850 * so we can read the chip ID register.
851 */
852 rid = RL_RID;
853 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid,
854 0, ~0, 1, RF_ACTIVE);
855 if (sc->rl_res == NULL) {
856 device_printf(dev,
857 "couldn't map ports/memory\n");
858 return(ENXIO);
859 }
860 sc->rl_btag = rman_get_bustag(sc->rl_res);
861 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
862 mtx_init(&sc->rl_mtx,
863 device_get_nameunit(dev),
864 MTX_NETWORK_LOCK, MTX_DEF);
865 RL_LOCK(sc);
866 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
867 bus_release_resource(dev, RL_RES,
868 RL_RID, sc->rl_res);
869 RL_UNLOCK(sc);
870 mtx_destroy(&sc->rl_mtx);
871 if (t->rl_basetype == hwrev) {
872 device_set_desc(dev, t->rl_name);
873 return(0);
874 }
875 }
876 t++;
877 }
878
879 return(ENXIO);
880}
881
882/*
883 * This routine takes the segment list provided as the result of
884 * a bus_dma_map_load() operation and assigns the addresses/lengths
885 * to RealTek DMA descriptors. This can be called either by the RX
886 * code or the TX code. In the RX case, we'll probably wind up mapping
887 * at most one segment. For the TX case, there could be any number of
888 * segments since TX packets may span multiple mbufs. In either case,
889 * if the number of segments is larger than the rl_maxsegs limit
890 * specified by the caller, we abort the mapping operation. Sadly,
891 * whoever designed the buffer mapping API did not provide a way to
892 * return an error from here, so we have to fake it a bit.
893 */
894
895static void
896re_dma_map_desc(arg, segs, nseg, mapsize, error)
897 void *arg;
898 bus_dma_segment_t *segs;
899 int nseg;
900 bus_size_t mapsize;
901 int error;
902{
903 struct rl_dmaload_arg *ctx;
904 struct rl_desc *d = NULL;
905 int i = 0, idx;
906
907 if (error)
908 return;
909
910 ctx = arg;
911
912 /* Signal error to caller if there's too many segments */
913 if (nseg > ctx->rl_maxsegs) {
914 ctx->rl_maxsegs = 0;
915 return;
916 }
917
918 /*
919 * Map the segment array into descriptors. Note that we set the
920 * start-of-frame and end-of-frame markers for either TX or RX, but
921 * they really only have meaning in the TX case. (In the RX case,
922 * it's the chip that tells us where packets begin and end.)
923 * We also keep track of the end of the ring and set the
924 * end-of-ring bits as needed, and we set the ownership bits
925 * in all except the very first descriptor. (The caller will
926 * set this descriptor later when it start transmission or
927 * reception.)
928 */
929 idx = ctx->rl_idx;
930 while(1) {
931 u_int32_t cmdstat;
932 d = &ctx->rl_ring[idx];
933 if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) {
934 ctx->rl_maxsegs = 0;
935 return;
936 }
937 cmdstat = segs[i].ds_len;
938 d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
939 d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
940 if (i == 0)
941 cmdstat |= RL_TDESC_CMD_SOF;
942 else
943 cmdstat |= RL_TDESC_CMD_OWN;
944 if (idx == (RL_RX_DESC_CNT - 1))
945 cmdstat |= RL_TDESC_CMD_EOR;
946 d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags);
947 i++;
948 if (i == nseg)
949 break;
950 RL_DESC_INC(idx);
951 }
952
953 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
954 ctx->rl_maxsegs = nseg;
955 ctx->rl_idx = idx;
956
957 return;
958}
959
960/*
961 * Map a single buffer address.
962 */
963
964static void
965re_dma_map_addr(arg, segs, nseg, error)
966 void *arg;
967 bus_dma_segment_t *segs;
968 int nseg;
969 int error;
970{
971 u_int32_t *addr;
972
973 if (error)
974 return;
975
976 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
977 addr = arg;
978 *addr = segs->ds_addr;
979
980 return;
981}
982
983static int
984re_allocmem(dev, sc)
985 device_t dev;
986 struct rl_softc *sc;
987{
988 int error;
989 int nseg;
990 int i;
991
992 /*
993 * Allocate map for RX mbufs.
994 */
995 nseg = 32;
996 error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0,
997 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
998 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
999 NULL, NULL, &sc->rl_ldata.rl_mtag);
1000 if (error) {
1001 device_printf(dev, "could not allocate dma tag\n");
1002 return (ENOMEM);
1003 }
1004
1005 /*
1006 * Allocate map for TX descriptor list.
1007 */
1008 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1009 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1010 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
1011 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1012 if (error) {
1013 device_printf(dev, "could not allocate dma tag\n");
1014 return (ENOMEM);
1015 }
1016
1017 /* Allocate DMA'able memory for the TX ring */
1018
1019 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1020 (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1021 &sc->rl_ldata.rl_tx_list_map);
1022 if (error)
1023 return (ENOMEM);
1024
1025 /* Load the map for the TX ring. */
1026
1027 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1028 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1029 RL_TX_LIST_SZ, re_dma_map_addr,
1030 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1031
1032 /* Create DMA maps for TX buffers */
1033
1034 for (i = 0; i < RL_TX_DESC_CNT; i++) {
1035 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
1036 &sc->rl_ldata.rl_tx_dmamap[i]);
1037 if (error) {
1038 device_printf(dev, "can't create DMA map for TX\n");
1039 return(ENOMEM);
1040 }
1041 }
1042
1043 /*
1044 * Allocate map for RX descriptor list.
1045 */
1046 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1047 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1048 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
1049 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1050 if (error) {
1051 device_printf(dev, "could not allocate dma tag\n");
1052 return (ENOMEM);
1053 }
1054
1055 /* Allocate DMA'able memory for the RX ring */
1056
1057 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1058 (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1059 &sc->rl_ldata.rl_rx_list_map);
1060 if (error)
1061 return (ENOMEM);
1062
1063 /* Load the map for the RX ring. */
1064
1065 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1066 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1067 RL_TX_LIST_SZ, re_dma_map_addr,
1068 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1069
1070 /* Create DMA maps for RX buffers */
1071
1072 for (i = 0; i < RL_RX_DESC_CNT; i++) {
1073 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
1074 &sc->rl_ldata.rl_rx_dmamap[i]);
1075 if (error) {
1076 device_printf(dev, "can't create DMA map for RX\n");
1077 return(ENOMEM);
1078 }
1079 }
1080
1081 return(0);
1082}
1083
1084/*
1085 * Attach the interface. Allocate softc structures, do ifmedia
1086 * setup and ethernet/BPF attach.
1087 */
1088static int
1089re_attach(dev)
1090 device_t dev;
1091{
1092 u_char eaddr[ETHER_ADDR_LEN];
1093 u_int16_t as[3];
1094 struct rl_softc *sc;
1095 struct ifnet *ifp;
1096 struct rl_hwrev *hw_rev;
1097 int hwrev;
1098 u_int16_t re_did = 0;
1099 int unit, error = 0, rid, i;
1100
1101 sc = device_get_softc(dev);
1102 unit = device_get_unit(dev);
1103
1104 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1105 MTX_DEF | MTX_RECURSE);
1106#ifndef BURN_BRIDGES
1107 /*
1108 * Handle power management nonsense.
1109 */
1110
1111 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1112 u_int32_t iobase, membase, irq;
1113
1114 /* Save important PCI config data. */
1115 iobase = pci_read_config(dev, RL_PCI_LOIO, 4);
1116 membase = pci_read_config(dev, RL_PCI_LOMEM, 4);
1117 irq = pci_read_config(dev, RL_PCI_INTLINE, 4);
1118
1119 /* Reset the power state. */
1120 printf("re%d: chip is is in D%d power mode "
1121 "-- setting to D0\n", unit,
1122 pci_get_powerstate(dev));
1123
1124 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1125
1126 /* Restore PCI config data. */
1127 pci_write_config(dev, RL_PCI_LOIO, iobase, 4);
1128 pci_write_config(dev, RL_PCI_LOMEM, membase, 4);
1129 pci_write_config(dev, RL_PCI_INTLINE, irq, 4);
1130 }
1131#endif
1132 /*
1133 * Map control/status registers.
1134 */
1135 pci_enable_busmaster(dev);
1136
1137 rid = RL_RID;
1138 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid,
1139 0, ~0, 1, RF_ACTIVE);
1140
1141 if (sc->rl_res == NULL) {
1142 printf ("re%d: couldn't map ports/memory\n", unit);
1143 error = ENXIO;
1144 goto fail;
1145 }
1146
1147 sc->rl_btag = rman_get_bustag(sc->rl_res);
1148 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1149
1150 /* Allocate interrupt */
1151 rid = 0;
1152 sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1153 RF_SHAREABLE | RF_ACTIVE);
1154
1155 if (sc->rl_irq == NULL) {
1156 printf("re%d: couldn't map interrupt\n", unit);
1157 error = ENXIO;
1158 goto fail;
1159 }
1160
1161 /* Reset the adapter. */
1162 re_reset(sc);
1163
1164 hw_rev = re_hwrevs;
1165 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
1166 while (hw_rev->rl_desc != NULL) {
1167 if (hw_rev->rl_rev == hwrev) {
1168 sc->rl_type = hw_rev->rl_type;
1169 break;
1170 }
1171 hw_rev++;
1172 }
1173
1174 if (sc->rl_type == RL_8169) {
1175
1176 /* Set RX length mask */
1177
1178 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1179
1180 /* Force station address autoload from the EEPROM */
1181
1182 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_AUTOLOAD);
1183 for (i = 0; i < RL_TIMEOUT; i++) {
1184 if (!(CSR_READ_1(sc, RL_EECMD) & RL_EEMODE_AUTOLOAD))
1185 break;
1186 DELAY(100);
1187 }
1188 if (i == RL_TIMEOUT)
1189 printf ("re%d: eeprom autoload timed out\n", unit);
1190
1191 for (i = 0; i < ETHER_ADDR_LEN; i++)
1192 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1193 } else {
1194
1195 /* Set RX length mask */
1196
1197 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1198
1199 sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
1200 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1, 0);
1201 if (re_did != 0x8129)
1202 sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
1203
1204 /*
1205 * Get station address from the EEPROM.
1206 */
1207 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0);
1208 for (i = 0; i < 3; i++) {
1209 eaddr[(i * 2) + 0] = as[i] & 0xff;
1210 eaddr[(i * 2) + 1] = as[i] >> 8;
1211 }
1212 }
1213
1214 /*
1215 * A RealTek chip was detected. Inform the world.
1216 */
1217 printf("re%d: Ethernet address: %6D\n", unit, eaddr, ":");
1218
1219 sc->rl_unit = unit;
1220 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1221
1222 /*
1223 * Allocate the parent bus DMA tag appropriate for PCI.
1224 */
1225#define RL_NSEG_NEW 32
1226 error = bus_dma_tag_create(NULL, /* parent */
1227 1, 0, /* alignment, boundary */
1228 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1229 BUS_SPACE_MAXADDR, /* highaddr */
1230 NULL, NULL, /* filter, filterarg */
1231 MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */
1232 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1233 BUS_DMA_ALLOCNOW, /* flags */
1234 NULL, NULL, /* lockfunc, lockarg */
1235 &sc->rl_parent_tag);
1236 if (error)
1237 goto fail;
1238
1239 error = re_allocmem(dev, sc);
1240
1241 if (error)
1242 goto fail;
1243
1244 /* Do MII setup */
1245 if (mii_phy_probe(dev, &sc->rl_miibus,
1246 re_ifmedia_upd, re_ifmedia_sts)) {
1247 printf("re%d: MII without any phy!\n", sc->rl_unit);
1248 error = ENXIO;
1249 goto fail;
1250 }
1251
1252 ifp = &sc->arpcom.ac_if;
1253 ifp->if_softc = sc;
1254 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1255 ifp->if_mtu = ETHERMTU;
1256 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1257 ifp->if_ioctl = re_ioctl;
1258 ifp->if_output = ether_output;
1259 ifp->if_capabilities = IFCAP_VLAN_MTU;
1260 ifp->if_start = re_start;
1261 ifp->if_hwassist = RE_CSUM_FEATURES;
1262 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1263 ifp->if_watchdog = re_watchdog;
1264 ifp->if_init = re_init;
1265 if (sc->rl_type == RL_8169)
1266 ifp->if_baudrate = 1000000000;
1267 else
1268 ifp->if_baudrate = 100000000;
1269 ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN;
1270 ifp->if_capenable = ifp->if_capabilities;
1271
1272 callout_handle_init(&sc->rl_stat_ch);
1273
1274 /*
1275 * Call MI attach routine.
1276 */
1277 ether_ifattach(ifp, eaddr);
1278
1279 /* Perform hardware diagnostic. */
1280 error = re_diag(sc);
1281
1282 if (error) {
1283 printf("re%d: attach aborted due to hardware diag failure\n",
1284 unit);
1285 ether_ifdetach(ifp);
1286 goto fail;
1287 }
1288
1289 /* Hook interrupt last to avoid having to lock softc */
1290 error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET,
1291 re_intr, sc, &sc->rl_intrhand);
1292
1293 if (error) {
1294 printf("re%d: couldn't set up irq\n", unit);
1295 ether_ifdetach(ifp);
1296 goto fail;
1297 }
1298
1299fail:
1300 if (error)
1301 re_detach(dev);
1302
1303 return (error);
1304}
1305
1306/*
1307 * Shutdown hardware and free up resources. This can be called any
1308 * time after the mutex has been initialized. It is called in both
1309 * the error case in attach and the normal detach case so it needs
1310 * to be careful about only freeing resources that have actually been
1311 * allocated.
1312 */
1313static int
1314re_detach(dev)
1315 device_t dev;
1316{
1317 struct rl_softc *sc;
1318 struct ifnet *ifp;
1319 int i;
1320
1321 sc = device_get_softc(dev);
1322 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
1323 RL_LOCK(sc);
1324 ifp = &sc->arpcom.ac_if;
1325
1326 /* These should only be active if attach succeeded */
1327 if (device_is_attached(dev)) {
1328 re_stop(sc);
1329 /*
1330 * Force off the IFF_UP flag here, in case someone
1331 * still had a BPF descriptor attached to this
1332 * interface. If they do, ether_ifattach() will cause
1333 * the BPF code to try and clear the promisc mode
1334 * flag, which will bubble down to re_ioctl(),
1335 * which will try to call re_init() again. This will
1336 * turn the NIC back on and restart the MII ticker,
1337 * which will panic the system when the kernel tries
1338 * to invoke the re_tick() function that isn't there
1339 * anymore.
1340 */
1341 ifp->if_flags &= ~IFF_UP;
1342 ether_ifdetach(ifp);
1343 }
1344 if (sc->rl_miibus)
1345 device_delete_child(dev, sc->rl_miibus);
1346 bus_generic_detach(dev);
1347
1348 if (sc->rl_intrhand)
1349 bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand);
1350 if (sc->rl_irq)
1351 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq);
1352 if (sc->rl_res)
1353 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res);
1354
1355
1356 /* Unload and free the RX DMA ring memory and map */
1357
1358 if (sc->rl_ldata.rl_rx_list_tag) {
1359 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1360 sc->rl_ldata.rl_rx_list_map);
1361 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1362 sc->rl_ldata.rl_rx_list,
1363 sc->rl_ldata.rl_rx_list_map);
1364 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1365 }
1366
1367 /* Unload and free the TX DMA ring memory and map */
1368
1369 if (sc->rl_ldata.rl_tx_list_tag) {
1370 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1371 sc->rl_ldata.rl_tx_list_map);
1372 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1373 sc->rl_ldata.rl_tx_list,
1374 sc->rl_ldata.rl_tx_list_map);
1375 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1376 }
1377
1378 /* Destroy all the RX and TX buffer maps */
1379
1380 if (sc->rl_ldata.rl_mtag) {
1381 for (i = 0; i < RL_TX_DESC_CNT; i++)
1382 bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1383 sc->rl_ldata.rl_tx_dmamap[i]);
1384 for (i = 0; i < RL_RX_DESC_CNT; i++)
1385 bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1386 sc->rl_ldata.rl_rx_dmamap[i]);
1387 bus_dma_tag_destroy(sc->rl_ldata.rl_mtag);
1388 }
1389
1390 /* Unload and free the stats buffer and map */
1391
1392 if (sc->rl_ldata.rl_stag) {
1393 bus_dmamap_unload(sc->rl_ldata.rl_stag,
1394 sc->rl_ldata.rl_rx_list_map);
1395 bus_dmamem_free(sc->rl_ldata.rl_stag,
1396 sc->rl_ldata.rl_stats,
1397 sc->rl_ldata.rl_smap);
1398 bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1399 }
1400
1401 if (sc->rl_parent_tag)
1402 bus_dma_tag_destroy(sc->rl_parent_tag);
1403
1404 RL_UNLOCK(sc);
1405 mtx_destroy(&sc->rl_mtx);
1406
1407 return(0);
1408}
1409
1410static int
1411re_newbuf(sc, idx, m)
1412 struct rl_softc *sc;
1413 int idx;
1414 struct mbuf *m;
1415{
1416 struct rl_dmaload_arg arg;
1417 struct mbuf *n = NULL;
1418 int error;
1419
1420 if (m == NULL) {
1421 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1422 if (n == NULL)
1423 return(ENOBUFS);
1424 m = n;
1425 } else
1426 m->m_data = m->m_ext.ext_buf;
1427
1428 /*
1429 * Initialize mbuf length fields and fixup
1430 * alignment so that the frame payload is
1431 * longword aligned.
1432 */
1433 m->m_len = m->m_pkthdr.len = MCLBYTES;
1434 m_adj(m, ETHER_ALIGN);
1435
1436 arg.sc = sc;
1437 arg.rl_idx = idx;
1438 arg.rl_maxsegs = 1;
1439 arg.rl_flags = 0;
1440 arg.rl_ring = sc->rl_ldata.rl_rx_list;
1441
1442 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag,
1443 sc->rl_ldata.rl_rx_dmamap[idx], m, re_dma_map_desc,
1444 &arg, BUS_DMA_NOWAIT);
1445 if (error || arg.rl_maxsegs != 1) {
1446 if (n != NULL)
1447 m_freem(n);
1448 return (ENOMEM);
1449 }
1450
1451 sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN);
1452 sc->rl_ldata.rl_rx_mbuf[idx] = m;
1453
1454 bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1455 sc->rl_ldata.rl_rx_dmamap[idx],
1456 BUS_DMASYNC_PREREAD);
1457
1458 return(0);
1459}
1460
1461static int
1462re_tx_list_init(sc)
1463 struct rl_softc *sc;
1464{
1465 bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ);
1466 bzero ((char *)&sc->rl_ldata.rl_tx_mbuf,
1467 (RL_TX_DESC_CNT * sizeof(struct mbuf *)));
1468
1469 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1470 sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE);
1471 sc->rl_ldata.rl_tx_prodidx = 0;
1472 sc->rl_ldata.rl_tx_considx = 0;
1473 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT;
1474
1475 return(0);
1476}
1477
1478static int
1479re_rx_list_init(sc)
1480 struct rl_softc *sc;
1481{
1482 int i;
1483
1484 bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ);
1485 bzero ((char *)&sc->rl_ldata.rl_rx_mbuf,
1486 (RL_RX_DESC_CNT * sizeof(struct mbuf *)));
1487
1488 for (i = 0; i < RL_RX_DESC_CNT; i++) {
1489 if (re_newbuf(sc, i, NULL) == ENOBUFS)
1490 return(ENOBUFS);
1491 }
1492
1493 /* Flush the RX descriptors */
1494
1495 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1496 sc->rl_ldata.rl_rx_list_map,
1497 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1498
1499 sc->rl_ldata.rl_rx_prodidx = 0;
1500 sc->rl_head = sc->rl_tail = NULL;
1501
1502 return(0);
1503}
1504
1505/*
1506 * RX handler for C+ and 8169. For the gigE chips, we support
1507 * the reception of jumbo frames that have been fragmented
1508 * across multiple 2K mbuf cluster buffers.
1509 */
1510static void
1511re_rxeof(sc)
1512 struct rl_softc *sc;
1513{
1514 struct mbuf *m;
1515 struct ifnet *ifp;
1516 int i, total_len;
1517 struct rl_desc *cur_rx;
1518 u_int32_t rxstat, rxvlan;
1519
1520 RL_LOCK_ASSERT(sc);
1521
1522 ifp = &sc->arpcom.ac_if;
1523 i = sc->rl_ldata.rl_rx_prodidx;
1524
1525 /* Invalidate the descriptor memory */
1526
1527 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1528 sc->rl_ldata.rl_rx_list_map,
1529 BUS_DMASYNC_POSTREAD);
1530
1531 while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) {
1532
1533 cur_rx = &sc->rl_ldata.rl_rx_list[i];
1534 m = sc->rl_ldata.rl_rx_mbuf[i];
1535 total_len = RL_RXBYTES(cur_rx);
1536 rxstat = le32toh(cur_rx->rl_cmdstat);
1537 rxvlan = le32toh(cur_rx->rl_vlanctl);
1538
1539 /* Invalidate the RX mbuf and unload its map */
1540
1541 bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1542 sc->rl_ldata.rl_rx_dmamap[i],
1543 BUS_DMASYNC_POSTWRITE);
1544 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
1545 sc->rl_ldata.rl_rx_dmamap[i]);
1546
1547 if (!(rxstat & RL_RDESC_STAT_EOF)) {
1548 m->m_len = MCLBYTES - ETHER_ALIGN;
1549 if (sc->rl_head == NULL)
1550 sc->rl_head = sc->rl_tail = m;
1551 else {
1552 m->m_flags &= ~M_PKTHDR;
1553 sc->rl_tail->m_next = m;
1554 sc->rl_tail = m;
1555 }
1556 re_newbuf(sc, i, NULL);
1557 RL_DESC_INC(i);
1558 continue;
1559 }
1560
1561 /*
1562 * NOTE: for the 8139C+, the frame length field
1563 * is always 12 bits in size, but for the gigE chips,
1564 * it is 13 bits (since the max RX frame length is 16K).
1565 * Unfortunately, all 32 bits in the status word
1566 * were already used, so to make room for the extra
1567 * length bit, RealTek took out the 'frame alignment
1568 * error' bit and shifted the other status bits
1569 * over one slot. The OWN, EOR, FS and LS bits are
1570 * still in the same places. We have already extracted
1571 * the frame length and checked the OWN bit, so rather
1572 * than using an alternate bit mapping, we shift the
1573 * status bits one space to the right so we can evaluate
1574 * them using the 8169 status as though it was in the
1575 * same format as that of the 8139C+.
1576 */
1577 if (sc->rl_type == RL_8169)
1578 rxstat >>= 1;
1579
1580 if (rxstat & RL_RDESC_STAT_RXERRSUM) {
1581 ifp->if_ierrors++;
1582 /*
1583 * If this is part of a multi-fragment packet,
1584 * discard all the pieces.
1585 */
1586 if (sc->rl_head != NULL) {
1587 m_freem(sc->rl_head);
1588 sc->rl_head = sc->rl_tail = NULL;
1589 }
1590 re_newbuf(sc, i, m);
1591 RL_DESC_INC(i);
1592 continue;
1593 }
1594
1595 /*
1596 * If allocating a replacement mbuf fails,
1597 * reload the current one.
1598 */
1599
1600 if (re_newbuf(sc, i, NULL)) {
1601 ifp->if_ierrors++;
1602 if (sc->rl_head != NULL) {
1603 m_freem(sc->rl_head);
1604 sc->rl_head = sc->rl_tail = NULL;
1605 }
1606 re_newbuf(sc, i, m);
1607 RL_DESC_INC(i);
1608 continue;
1609 }
1610
1611 RL_DESC_INC(i);
1612
1613 if (sc->rl_head != NULL) {
1614 m->m_len = total_len % (MCLBYTES - ETHER_ALIGN);
1615 /*
1616 * Special case: if there's 4 bytes or less
1617 * in this buffer, the mbuf can be discarded:
1618 * the last 4 bytes is the CRC, which we don't
1619 * care about anyway.
1620 */
1621 if (m->m_len <= ETHER_CRC_LEN) {
1622 sc->rl_tail->m_len -=
1623 (ETHER_CRC_LEN - m->m_len);
1624 m_freem(m);
1625 } else {
1626 m->m_len -= ETHER_CRC_LEN;
1627 m->m_flags &= ~M_PKTHDR;
1628 sc->rl_tail->m_next = m;
1629 }
1630 m = sc->rl_head;
1631 sc->rl_head = sc->rl_tail = NULL;
1632 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1633 } else
1634 m->m_pkthdr.len = m->m_len =
1635 (total_len - ETHER_CRC_LEN);
1636
1637 ifp->if_ipackets++;
1638 m->m_pkthdr.rcvif = ifp;
1639
1640 /* Do RX checksumming if enabled */
1641
1642 if (ifp->if_capenable & IFCAP_RXCSUM) {
1643
1644 /* Check IP header checksum */
1645 if (rxstat & RL_RDESC_STAT_PROTOID)
1646 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1647 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
1648 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1649
1650 /* Check TCP/UDP checksum */
1651 if ((RL_TCPPKT(rxstat) &&
1652 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1653 (RL_UDPPKT(rxstat) &&
1654 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
1655 m->m_pkthdr.csum_flags |=
1656 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1657 m->m_pkthdr.csum_data = 0xffff;
1658 }
1659 }
1660
1661 if (rxvlan & RL_RDESC_VLANCTL_TAG)
1662 VLAN_INPUT_TAG(ifp, m,
1663 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue);
1664 RL_UNLOCK(sc);
1665 (*ifp->if_input)(ifp, m);
1666 RL_LOCK(sc);
1667 }
1668
1669 /* Flush the RX DMA ring */
1670
1671 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1672 sc->rl_ldata.rl_rx_list_map,
1673 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1674
1675 sc->rl_ldata.rl_rx_prodidx = i;
1676
1677 return;
1678}
1679
1680static void
1681re_txeof(sc)
1682 struct rl_softc *sc;
1683{
1684 struct ifnet *ifp;
1685 u_int32_t txstat;
1686 int idx;
1687
1688 ifp = &sc->arpcom.ac_if;
1689 idx = sc->rl_ldata.rl_tx_considx;
1690
1691 /* Invalidate the TX descriptor list */
1692
1693 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1694 sc->rl_ldata.rl_tx_list_map,
1695 BUS_DMASYNC_POSTREAD);
1696
1697 while (idx != sc->rl_ldata.rl_tx_prodidx) {
1698
1699 txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat);
1700 if (txstat & RL_TDESC_CMD_OWN)
1701 break;
1702
1703 /*
1704 * We only stash mbufs in the last descriptor
1705 * in a fragment chain, which also happens to
1706 * be the only place where the TX status bits
1707 * are valid.
1708 */
1709
1710 if (txstat & RL_TDESC_CMD_EOF) {
1711 m_freem(sc->rl_ldata.rl_tx_mbuf[idx]);
1712 sc->rl_ldata.rl_tx_mbuf[idx] = NULL;
1713 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
1714 sc->rl_ldata.rl_tx_dmamap[idx]);
1715 if (txstat & (RL_TDESC_STAT_EXCESSCOL|
1716 RL_TDESC_STAT_COLCNT))
1717 ifp->if_collisions++;
1718 if (txstat & RL_TDESC_STAT_TXERRSUM)
1719 ifp->if_oerrors++;
1720 else
1721 ifp->if_opackets++;
1722 }
1723 sc->rl_ldata.rl_tx_free++;
1724 RL_DESC_INC(idx);
1725 }
1726
1727 /* No changes made to the TX ring, so no flush needed */
1728
1729 if (idx != sc->rl_ldata.rl_tx_considx) {
1730 sc->rl_ldata.rl_tx_considx = idx;
1731 ifp->if_flags &= ~IFF_OACTIVE;
1732 ifp->if_timer = 0;
1733 }
1734
1735 /*
1736 * If not all descriptors have been released reaped yet,
1737 * reload the timer so that we will eventually get another
1738 * interrupt that will cause us to re-enter this routine.
1739 * This is done in case the transmitter has gone idle.
1740 */
1741 if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT)
1742 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
1743
1744 return;
1745}
1746
1747static void
1748re_tick(xsc)
1749 void *xsc;
1750{
1751 struct rl_softc *sc;
1752 struct mii_data *mii;
1753
1754 sc = xsc;
1755 RL_LOCK(sc);
1756 mii = device_get_softc(sc->rl_miibus);
1757
1758 mii_tick(mii);
1759
1760 sc->rl_stat_ch = timeout(re_tick, sc, hz);
1761 RL_UNLOCK(sc);
1762
1763 return;
1764}
1765
1766#ifdef DEVICE_POLLING
1767static void
1768re_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1769{
1770 struct rl_softc *sc = ifp->if_softc;
1771
1772 RL_LOCK(sc);
1773 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1774 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
1775 goto done;
1776 }
1777
1778 sc->rxcycles = count;
1779 re_rxeof(sc);
1780 re_txeof(sc);
1781
1782 if (ifp->if_snd.ifq_head != NULL)
1783 (*ifp->if_start)(ifp);
1784
1785 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1786 u_int16_t status;
1787
1788 status = CSR_READ_2(sc, RL_ISR);
1789 if (status == 0xffff)
1790 goto done;
1791 if (status)
1792 CSR_WRITE_2(sc, RL_ISR, status);
1793
1794 /*
1795 * XXX check behaviour on receiver stalls.
1796 */
1797
1798 if (status & RL_ISR_SYSTEM_ERR) {
1799 re_reset(sc);
1800 re_init(sc);
1801 }
1802 }
1803done:
1804 RL_UNLOCK(sc);
1805}
1806#endif /* DEVICE_POLLING */
1807
1808static void
1809re_intr(arg)
1810 void *arg;
1811{
1812 struct rl_softc *sc;
1813 struct ifnet *ifp;
1814 u_int16_t status;
1815
1816 sc = arg;
1817
1818 if (sc->suspended) {
1819 return;
1820 }
1821
1822 RL_LOCK(sc);
1823 ifp = &sc->arpcom.ac_if;
1824
1825 if (!(ifp->if_flags & IFF_UP)) {
1826 RL_UNLOCK(sc);
1827 return;
1828 }
1829
1830#ifdef DEVICE_POLLING
1831 if (ifp->if_flags & IFF_POLLING)
1832 goto done;
1833 if (ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
1834 CSR_WRITE_2(sc, RL_IMR, 0x0000);
1835 re_poll(ifp, 0, 1);
1836 goto done;
1837 }
1838#endif /* DEVICE_POLLING */
1839
1840 for (;;) {
1841
1842 status = CSR_READ_2(sc, RL_ISR);
1843 /* If the card has gone away the read returns 0xffff. */
1844 if (status == 0xffff)
1845 break;
1846 if (status)
1847 CSR_WRITE_2(sc, RL_ISR, status);
1848
1849 if ((status & RL_INTRS_CPLUS) == 0)
1850 break;
1851
1852 if (status & RL_ISR_RX_OK)
1853 re_rxeof(sc);
1854
1855 if (status & RL_ISR_RX_ERR)
1856 re_rxeof(sc);
1857
1858 if ((status & RL_ISR_TIMEOUT_EXPIRED) ||
1859 (status & RL_ISR_TX_ERR) ||
1860 (status & RL_ISR_TX_DESC_UNAVAIL))
1861 re_txeof(sc);
1862
1863 if (status & RL_ISR_SYSTEM_ERR) {
1864 re_reset(sc);
1865 re_init(sc);
1866 }
1867
1868 if (status & RL_ISR_LINKCHG) {
1869 untimeout(re_tick, sc, sc->rl_stat_ch);
1870 re_tick(sc);
1871 }
1872 }
1873
1874 if (ifp->if_snd.ifq_head != NULL)
1875 (*ifp->if_start)(ifp);
1876
1877#ifdef DEVICE_POLLING
1878done:
1879#endif
1880 RL_UNLOCK(sc);
1881
1882 return;
1883}
1884
1885static int
1886re_encap(sc, m_head, idx)
1887 struct rl_softc *sc;
1888 struct mbuf *m_head;
1889 int *idx;
1890{
1891 struct mbuf *m_new = NULL;
1892 struct rl_dmaload_arg arg;
1893 bus_dmamap_t map;
1894 int error;
1895 struct m_tag *mtag;
1896
1897 if (sc->rl_ldata.rl_tx_free <= 4)
1898 return(EFBIG);
1899
1900 /*
1901 * Set up checksum offload. Note: checksum offload bits must
1902 * appear in all descriptors of a multi-descriptor transmit
1903 * attempt. (This is according to testing done with an 8169
1904 * chip. I'm not sure if this is a requirement or a bug.)
1905 */
1906
1907 arg.rl_flags = 0;
1908
1909 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1910 arg.rl_flags |= RL_TDESC_CMD_IPCSUM;
1911 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1912 arg.rl_flags |= RL_TDESC_CMD_TCPCSUM;
1913 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1914 arg.rl_flags |= RL_TDESC_CMD_UDPCSUM;
1915
1916 arg.sc = sc;
1917 arg.rl_idx = *idx;
1918 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
1919 if (arg.rl_maxsegs > 4)
1920 arg.rl_maxsegs -= 4;
1921 arg.rl_ring = sc->rl_ldata.rl_tx_list;
1922
1923 map = sc->rl_ldata.rl_tx_dmamap[*idx];
1924 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
1925 m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
1926
1927 if (error && error != EFBIG) {
1928 printf("re%d: can't map mbuf (error %d)\n", sc->rl_unit, error);
1929 return(ENOBUFS);
1930 }
1931
1932 /* Too many segments to map, coalesce into a single mbuf */
1933
1934 if (error || arg.rl_maxsegs == 0) {
1935 m_new = m_defrag(m_head, M_DONTWAIT);
1936 if (m_new == NULL)
1937 return(1);
1938 else
1939 m_head = m_new;
1940
1941 arg.sc = sc;
1942 arg.rl_idx = *idx;
1943 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
1944 arg.rl_ring = sc->rl_ldata.rl_tx_list;
1945
1946 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
1947 m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
1948 if (error) {
1949 printf("re%d: can't map mbuf (error %d)\n",
1950 sc->rl_unit, error);
1951 return(EFBIG);
1952 }
1953 }
1954
1955 /*
1956 * Insure that the map for this transmission
1957 * is placed at the array index of the last descriptor
1958 * in this chain.
1959 */
1960 sc->rl_ldata.rl_tx_dmamap[*idx] =
1961 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx];
1962 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map;
1963
1964 sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head;
1965 sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs;
1966
1967 /*
1968 * Set up hardware VLAN tagging. Note: vlan tag info must
1969 * appear in the first descriptor of a multi-descriptor
1970 * transmission attempt.
1971 */
1972
1973 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
1974 if (mtag != NULL)
1975 sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl =
1976 htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG);
1977
1978 /* Transfer ownership of packet to the chip. */
1979
1980 sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |=
1981 htole32(RL_TDESC_CMD_OWN);
1982 if (*idx != arg.rl_idx)
1983 sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |=
1984 htole32(RL_TDESC_CMD_OWN);
1985
1986 RL_DESC_INC(arg.rl_idx);
1987 *idx = arg.rl_idx;
1988
1989 return(0);
1990}
1991
1992/*
1993 * Main transmit routine for C+ and gigE NICs.
1994 */
1995
1996static void
1997re_start(ifp)
1998 struct ifnet *ifp;
1999{
2000 struct rl_softc *sc;
2001 struct mbuf *m_head = NULL;
2002 int idx;
2003
2004 sc = ifp->if_softc;
2005 RL_LOCK(sc);
2006
2007 idx = sc->rl_ldata.rl_tx_prodidx;
2008
2009 while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) {
2010 IF_DEQUEUE(&ifp->if_snd, m_head);
2011 if (m_head == NULL)
2012 break;
2013
2014 if (re_encap(sc, m_head, &idx)) {
2015 IF_PREPEND(&ifp->if_snd, m_head);
2016 ifp->if_flags |= IFF_OACTIVE;
2017 break;
2018 }
2019
2020 /*
2021 * If there's a BPF listener, bounce a copy of this frame
2022 * to him.
2023 */
2024 BPF_MTAP(ifp, m_head);
2025 }
2026
2027 /* Flush the TX descriptors */
2028
2029 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2030 sc->rl_ldata.rl_tx_list_map,
2031 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2032
2033 sc->rl_ldata.rl_tx_prodidx = idx;
2034
2035 /*
2036 * RealTek put the TX poll request register in a different
2037 * location on the 8169 gigE chip. I don't know why.
2038 */
2039
2040 if (sc->rl_type == RL_8169)
2041 CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START);
2042 else
2043 CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START);
2044
2045 /*
2046 * Use the countdown timer for interrupt moderation.
2047 * 'TX done' interrupts are disabled. Instead, we reset the
2048 * countdown timer, which will begin counting until it hits
2049 * the value in the TIMERINT register, and then trigger an
2050 * interrupt. Each time we write to the TIMERCNT register,
2051 * the timer count is reset to 0.
2052 */
2053 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2054
2055 RL_UNLOCK(sc);
2056
2057 /*
2058 * Set a timeout in case the chip goes out to lunch.
2059 */
2060 ifp->if_timer = 5;
2061
2062 return;
2063}
2064
2065static void
2066re_init(xsc)
2067 void *xsc;
2068{
2069 struct rl_softc *sc = xsc;
2070 struct ifnet *ifp = &sc->arpcom.ac_if;
2071 struct mii_data *mii;
2072 u_int32_t rxcfg = 0;
2073
2074 RL_LOCK(sc);
2075 mii = device_get_softc(sc->rl_miibus);
2076
2077 /*
2078 * Cancel pending I/O and free all RX/TX buffers.
2079 */
2080 re_stop(sc);
2081
2082 /*
2083 * Enable C+ RX and TX mode, as well as VLAN stripping and
2084 * RX checksum offload. We must configure the C+ register
2085 * before all others.
2086 */
2087 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB|
2088 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW|
2089 RL_CPLUSCMD_VLANSTRIP|
2090 (ifp->if_capenable & IFCAP_RXCSUM ?
2091 RL_CPLUSCMD_RXCSUM_ENB : 0));
2092
2093 /*
2094 * Init our MAC address. Even though the chipset
2095 * documentation doesn't mention it, we need to enter "Config
2096 * register write enable" mode to modify the ID registers.
2097 */
2098 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2099 CSR_WRITE_STREAM_4(sc, RL_IDR0,
2100 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
2101 CSR_WRITE_STREAM_4(sc, RL_IDR4,
2102 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
2103 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2104
2105 /*
2106 * For C+ mode, initialize the RX descriptors and mbufs.
2107 */
2108 re_rx_list_init(sc);
2109 re_tx_list_init(sc);
2110
2111 /*
2112 * Enable transmit and receive.
2113 */
2114 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2115
2116 /*
2117 * Set the initial TX and RX configuration.
2118 */
2119 if (sc->rl_testmode) {
2120 if (sc->rl_type == RL_8169)
2121 CSR_WRITE_4(sc, RL_TXCFG,
2122 RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
2123 else
2124 CSR_WRITE_4(sc, RL_TXCFG,
2125 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
2126 } else
2127 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
2128 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
2129
2130 /* Set the individual bit to receive frames for this host only. */
2131 rxcfg = CSR_READ_4(sc, RL_RXCFG);
2132 rxcfg |= RL_RXCFG_RX_INDIV;
2133
2134 /* If we want promiscuous mode, set the allframes bit. */
2135 if (ifp->if_flags & IFF_PROMISC) {
2136 rxcfg |= RL_RXCFG_RX_ALLPHYS;
2137 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2138 } else {
2139 rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
2140 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2141 }
2142
2143 /*
2144 * Set capture broadcast bit to capture broadcast frames.
2145 */
2146 if (ifp->if_flags & IFF_BROADCAST) {
2147 rxcfg |= RL_RXCFG_RX_BROAD;
2148 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2149 } else {
2150 rxcfg &= ~RL_RXCFG_RX_BROAD;
2151 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2152 }
2153
2154 /*
2155 * Program the multicast filter, if necessary.
2156 */
2157 re_setmulti(sc);
2158
2159#ifdef DEVICE_POLLING
2160 /*
2161 * Disable interrupts if we are polling.
2162 */
2163 if (ifp->if_flags & IFF_POLLING)
2164 CSR_WRITE_2(sc, RL_IMR, 0);
2165 else /* otherwise ... */
2166#endif /* DEVICE_POLLING */
2167 /*
2168 * Enable interrupts.
2169 */
2170 if (sc->rl_testmode)
2171 CSR_WRITE_2(sc, RL_IMR, 0);
2172 else
2173 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2174
2175 /* Set initial TX threshold */
2176 sc->rl_txthresh = RL_TX_THRESH_INIT;
2177
2178 /* Start RX/TX process. */
2179 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
2180#ifdef notdef
2181 /* Enable receiver and transmitter. */
2182 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2183#endif
2184 /*
2185 * Load the addresses of the RX and TX lists into the chip.
2186 */
2187
2188 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
2189 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
2190 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
2191 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
2192
2193 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
2194 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
2195 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
2196 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
2197
2198 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
2199
2200 /*
2201 * Initialize the timer interrupt register so that
2202 * a timer interrupt will be generated once the timer
2203 * reaches a certain number of ticks. The timer is
2204 * reloaded on each transmit. This gives us TX interrupt
2205 * moderation, which dramatically improves TX frame rate.
2206 */
2207
2208 if (sc->rl_type == RL_8169)
2209 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
2210 else
2211 CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
2212
2213 /*
2214 * For 8169 gigE NICs, set the max allowed RX packet
2215 * size so we can receive jumbo frames.
2216 */
2217 if (sc->rl_type == RL_8169)
2218 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
2219
2220 if (sc->rl_testmode) {
2221 RL_UNLOCK(sc);
2222 return;
2223 }
2224
2225 mii_mediachg(mii);
2226
2227 CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
2228
2229 ifp->if_flags |= IFF_RUNNING;
2230 ifp->if_flags &= ~IFF_OACTIVE;
2231
2232 sc->rl_stat_ch = timeout(re_tick, sc, hz);
2233 RL_UNLOCK(sc);
2234
2235 return;
2236}
2237
2238/*
2239 * Set media options.
2240 */
2241static int
2242re_ifmedia_upd(ifp)
2243 struct ifnet *ifp;
2244{
2245 struct rl_softc *sc;
2246 struct mii_data *mii;
2247
2248 sc = ifp->if_softc;
2249 mii = device_get_softc(sc->rl_miibus);
2250 mii_mediachg(mii);
2251
2252 return(0);
2253}
2254
2255/*
2256 * Report current media status.
2257 */
2258static void
2259re_ifmedia_sts(ifp, ifmr)
2260 struct ifnet *ifp;
2261 struct ifmediareq *ifmr;
2262{
2263 struct rl_softc *sc;
2264 struct mii_data *mii;
2265
2266 sc = ifp->if_softc;
2267 mii = device_get_softc(sc->rl_miibus);
2268
2269 mii_pollstat(mii);
2270 ifmr->ifm_active = mii->mii_media_active;
2271 ifmr->ifm_status = mii->mii_media_status;
2272
2273 return;
2274}
2275
2276static int
2277re_ioctl(ifp, command, data)
2278 struct ifnet *ifp;
2279 u_long command;
2280 caddr_t data;
2281{
2282 struct rl_softc *sc = ifp->if_softc;
2283 struct ifreq *ifr = (struct ifreq *) data;
2284 struct mii_data *mii;
2285 int error = 0;
2286
2287 RL_LOCK(sc);
2288
2289 switch(command) {
2290 case SIOCSIFMTU:
2291 if (ifr->ifr_mtu > RL_JUMBO_MTU)
2292 error = EINVAL;
2293 ifp->if_mtu = ifr->ifr_mtu;
2294 break;
2295 case SIOCSIFFLAGS:
2296 if (ifp->if_flags & IFF_UP) {
2297 re_init(sc);
2298 } else {
2299 if (ifp->if_flags & IFF_RUNNING)
2300 re_stop(sc);
2301 }
2302 error = 0;
2303 break;
2304 case SIOCADDMULTI:
2305 case SIOCDELMULTI:
2306 re_setmulti(sc);
2307 error = 0;
2308 break;
2309 case SIOCGIFMEDIA:
2310 case SIOCSIFMEDIA:
2311 mii = device_get_softc(sc->rl_miibus);
2312 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2313 break;
2314 case SIOCSIFCAP:
2315 ifp->if_capenable = ifr->ifr_reqcap;
2316 if (ifp->if_capenable & IFCAP_TXCSUM)
2317 ifp->if_hwassist = RE_CSUM_FEATURES;
2318 else
2319 ifp->if_hwassist = 0;
2320 if (ifp->if_flags & IFF_RUNNING)
2321 re_init(sc);
2322 break;
2323 default:
2324 error = ether_ioctl(ifp, command, data);
2325 break;
2326 }
2327
2328 RL_UNLOCK(sc);
2329
2330 return(error);
2331}
2332
2333static void
2334re_watchdog(ifp)
2335 struct ifnet *ifp;
2336{
2337 struct rl_softc *sc;
2338
2339 sc = ifp->if_softc;
2340 RL_LOCK(sc);
2341 printf("re%d: watchdog timeout\n", sc->rl_unit);
2342 ifp->if_oerrors++;
2343
2344 re_txeof(sc);
2345 re_rxeof(sc);
2346
2347 re_init(sc);
2348
2349 RL_UNLOCK(sc);
2350
2351 return;
2352}
2353
2354/*
2355 * Stop the adapter and free any mbufs allocated to the
2356 * RX and TX lists.
2357 */
2358static void
2359re_stop(sc)
2360 struct rl_softc *sc;
2361{
2362 register int i;
2363 struct ifnet *ifp;
2364
2365 RL_LOCK(sc);
2366 ifp = &sc->arpcom.ac_if;
2367 ifp->if_timer = 0;
2368
2369 untimeout(re_tick, sc, sc->rl_stat_ch);
2370 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2371#ifdef DEVICE_POLLING
2372 ether_poll_deregister(ifp);
2373#endif /* DEVICE_POLLING */
2374
2375 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2376 CSR_WRITE_2(sc, RL_IMR, 0x0000);
2377
2378 if (sc->rl_head != NULL) {
2379 m_freem(sc->rl_head);
2380 sc->rl_head = sc->rl_tail = NULL;
2381 }
2382
2383 /* Free the TX list buffers. */
2384
2385 for (i = 0; i < RL_TX_DESC_CNT; i++) {
2386 if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) {
2387 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2388 sc->rl_ldata.rl_tx_dmamap[i]);
2389 m_freem(sc->rl_ldata.rl_tx_mbuf[i]);
2390 sc->rl_ldata.rl_tx_mbuf[i] = NULL;
2391 }
2392 }
2393
2394 /* Free the RX list buffers. */
2395
2396 for (i = 0; i < RL_RX_DESC_CNT; i++) {
2397 if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) {
2398 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2399 sc->rl_ldata.rl_rx_dmamap[i]);
2400 m_freem(sc->rl_ldata.rl_rx_mbuf[i]);
2401 sc->rl_ldata.rl_rx_mbuf[i] = NULL;
2402 }
2403 }
2404
2405 RL_UNLOCK(sc);
2406 return;
2407}
2408
2409/*
2410 * Device suspend routine. Stop the interface and save some PCI
2411 * settings in case the BIOS doesn't restore them properly on
2412 * resume.
2413 */
2414static int
2415re_suspend(dev)
2416 device_t dev;
2417{
2418 register int i;
2419 struct rl_softc *sc;
2420
2421 sc = device_get_softc(dev);
2422
2423 re_stop(sc);
2424
2425 for (i = 0; i < 5; i++)
2426 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2427 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2428 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2429 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2430 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2431
2432 sc->suspended = 1;
2433
2434 return (0);
2435}
2436
2437/*
2438 * Device resume routine. Restore some PCI settings in case the BIOS
2439 * doesn't, re-enable busmastering, and restart the interface if
2440 * appropriate.
2441 */
2442static int
2443re_resume(dev)
2444 device_t dev;
2445{
2446 register int i;
2447 struct rl_softc *sc;
2448 struct ifnet *ifp;
2449
2450 sc = device_get_softc(dev);
2451 ifp = &sc->arpcom.ac_if;
2452
2453 /* better way to do this? */
2454 for (i = 0; i < 5; i++)
2455 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2456 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2457 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2458 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2459 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2460
2461 /* reenable busmastering */
2462 pci_enable_busmaster(dev);
2463 pci_enable_io(dev, RL_RES);
2464
2465 /* reinitialize interface if necessary */
2466 if (ifp->if_flags & IFF_UP)
2467 re_init(sc);
2468
2469 sc->suspended = 0;
2470
2471 return (0);
2472}
2473
2474/*
2475 * Stop all chip I/O so that the kernel's probe routines don't
2476 * get confused by errant DMAs when rebooting.
2477 */
2478static void
2479re_shutdown(dev)
2480 device_t dev;
2481{
2482 struct rl_softc *sc;
2483
2484 sc = device_get_softc(dev);
2485
2486 re_stop(sc);
2487
2488 return;
2489}
273DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
274
275#define EE_SET(x) \
276 CSR_WRITE_1(sc, RL_EECMD, \
277 CSR_READ_1(sc, RL_EECMD) | x)
278
279#define EE_CLR(x) \
280 CSR_WRITE_1(sc, RL_EECMD, \
281 CSR_READ_1(sc, RL_EECMD) & ~x)
282
283/*
284 * Send a read command and address to the EEPROM, check for ACK.
285 */
286static void
287re_eeprom_putbyte(sc, addr)
288 struct rl_softc *sc;
289 int addr;
290{
291 register int d, i;
292
293 d = addr | sc->rl_eecmd_read;
294
295 /*
296 * Feed in each bit and strobe the clock.
297 */
298 for (i = 0x400; i; i >>= 1) {
299 if (d & i) {
300 EE_SET(RL_EE_DATAIN);
301 } else {
302 EE_CLR(RL_EE_DATAIN);
303 }
304 DELAY(100);
305 EE_SET(RL_EE_CLK);
306 DELAY(150);
307 EE_CLR(RL_EE_CLK);
308 DELAY(100);
309 }
310
311 return;
312}
313
314/*
315 * Read a word of data stored in the EEPROM at address 'addr.'
316 */
317static void
318re_eeprom_getword(sc, addr, dest)
319 struct rl_softc *sc;
320 int addr;
321 u_int16_t *dest;
322{
323 register int i;
324 u_int16_t word = 0;
325
326 /* Enter EEPROM access mode. */
327 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
328
329 /*
330 * Send address of word we want to read.
331 */
332 re_eeprom_putbyte(sc, addr);
333
334 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
335
336 /*
337 * Start reading bits from EEPROM.
338 */
339 for (i = 0x8000; i; i >>= 1) {
340 EE_SET(RL_EE_CLK);
341 DELAY(100);
342 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
343 word |= i;
344 EE_CLR(RL_EE_CLK);
345 DELAY(100);
346 }
347
348 /* Turn off EEPROM access mode. */
349 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
350
351 *dest = word;
352
353 return;
354}
355
356/*
357 * Read a sequence of words from the EEPROM.
358 */
359static void
360re_read_eeprom(sc, dest, off, cnt, swap)
361 struct rl_softc *sc;
362 caddr_t dest;
363 int off;
364 int cnt;
365 int swap;
366{
367 int i;
368 u_int16_t word = 0, *ptr;
369
370 for (i = 0; i < cnt; i++) {
371 re_eeprom_getword(sc, off + i, &word);
372 ptr = (u_int16_t *)(dest + (i * 2));
373 if (swap)
374 *ptr = ntohs(word);
375 else
376 *ptr = word;
377 }
378
379 return;
380}
381
382static int
383re_gmii_readreg(dev, phy, reg)
384 device_t dev;
385 int phy, reg;
386{
387 struct rl_softc *sc;
388 u_int32_t rval;
389 int i;
390
391 if (phy != 1)
392 return(0);
393
394 sc = device_get_softc(dev);
395
396 /* Let the rgephy driver read the GMEDIASTAT register */
397
398 if (reg == RL_GMEDIASTAT) {
399 rval = CSR_READ_1(sc, RL_GMEDIASTAT);
400 return(rval);
401 }
402
403 CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
404 DELAY(1000);
405
406 for (i = 0; i < RL_TIMEOUT; i++) {
407 rval = CSR_READ_4(sc, RL_PHYAR);
408 if (rval & RL_PHYAR_BUSY)
409 break;
410 DELAY(100);
411 }
412
413 if (i == RL_TIMEOUT) {
414 printf ("re%d: PHY read failed\n", sc->rl_unit);
415 return (0);
416 }
417
418 return (rval & RL_PHYAR_PHYDATA);
419}
420
421static int
422re_gmii_writereg(dev, phy, reg, data)
423 device_t dev;
424 int phy, reg, data;
425{
426 struct rl_softc *sc;
427 u_int32_t rval;
428 int i;
429
430 sc = device_get_softc(dev);
431
432 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
433 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
434 DELAY(1000);
435
436 for (i = 0; i < RL_TIMEOUT; i++) {
437 rval = CSR_READ_4(sc, RL_PHYAR);
438 if (!(rval & RL_PHYAR_BUSY))
439 break;
440 DELAY(100);
441 }
442
443 if (i == RL_TIMEOUT) {
444 printf ("re%d: PHY write failed\n", sc->rl_unit);
445 return (0);
446 }
447
448 return (0);
449}
450
451static int
452re_miibus_readreg(dev, phy, reg)
453 device_t dev;
454 int phy, reg;
455{
456 struct rl_softc *sc;
457 u_int16_t rval = 0;
458 u_int16_t re8139_reg = 0;
459
460 sc = device_get_softc(dev);
461 RL_LOCK(sc);
462
463 if (sc->rl_type == RL_8169) {
464 rval = re_gmii_readreg(dev, phy, reg);
465 RL_UNLOCK(sc);
466 return (rval);
467 }
468
469 /* Pretend the internal PHY is only at address 0 */
470 if (phy) {
471 RL_UNLOCK(sc);
472 return(0);
473 }
474 switch(reg) {
475 case MII_BMCR:
476 re8139_reg = RL_BMCR;
477 break;
478 case MII_BMSR:
479 re8139_reg = RL_BMSR;
480 break;
481 case MII_ANAR:
482 re8139_reg = RL_ANAR;
483 break;
484 case MII_ANER:
485 re8139_reg = RL_ANER;
486 break;
487 case MII_ANLPAR:
488 re8139_reg = RL_LPAR;
489 break;
490 case MII_PHYIDR1:
491 case MII_PHYIDR2:
492 RL_UNLOCK(sc);
493 return(0);
494 /*
495 * Allow the rlphy driver to read the media status
496 * register. If we have a link partner which does not
497 * support NWAY, this is the register which will tell
498 * us the results of parallel detection.
499 */
500 case RL_MEDIASTAT:
501 rval = CSR_READ_1(sc, RL_MEDIASTAT);
502 RL_UNLOCK(sc);
503 return(rval);
504 default:
505 printf("re%d: bad phy register\n", sc->rl_unit);
506 RL_UNLOCK(sc);
507 return(0);
508 }
509 rval = CSR_READ_2(sc, re8139_reg);
510 RL_UNLOCK(sc);
511 return(rval);
512}
513
514static int
515re_miibus_writereg(dev, phy, reg, data)
516 device_t dev;
517 int phy, reg, data;
518{
519 struct rl_softc *sc;
520 u_int16_t re8139_reg = 0;
521 int rval = 0;
522
523 sc = device_get_softc(dev);
524 RL_LOCK(sc);
525
526 if (sc->rl_type == RL_8169) {
527 rval = re_gmii_writereg(dev, phy, reg, data);
528 RL_UNLOCK(sc);
529 return (rval);
530 }
531
532 /* Pretend the internal PHY is only at address 0 */
533 if (phy) {
534 RL_UNLOCK(sc);
535 return(0);
536 }
537 switch(reg) {
538 case MII_BMCR:
539 re8139_reg = RL_BMCR;
540 break;
541 case MII_BMSR:
542 re8139_reg = RL_BMSR;
543 break;
544 case MII_ANAR:
545 re8139_reg = RL_ANAR;
546 break;
547 case MII_ANER:
548 re8139_reg = RL_ANER;
549 break;
550 case MII_ANLPAR:
551 re8139_reg = RL_LPAR;
552 break;
553 case MII_PHYIDR1:
554 case MII_PHYIDR2:
555 RL_UNLOCK(sc);
556 return(0);
557 break;
558 default:
559 printf("re%d: bad phy register\n", sc->rl_unit);
560 RL_UNLOCK(sc);
561 return(0);
562 }
563 CSR_WRITE_2(sc, re8139_reg, data);
564 RL_UNLOCK(sc);
565 return(0);
566}
567
568static void
569re_miibus_statchg(dev)
570 device_t dev;
571{
572 return;
573}
574
575/*
576 * Calculate CRC of a multicast group address, return the upper 6 bits.
577 */
578static u_int32_t
579re_mchash(addr)
580 caddr_t addr;
581{
582 u_int32_t crc, carry;
583 int idx, bit;
584 u_int8_t data;
585
586 /* Compute CRC for the address value. */
587 crc = 0xFFFFFFFF; /* initial value */
588
589 for (idx = 0; idx < 6; idx++) {
590 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
591 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
592 crc <<= 1;
593 if (carry)
594 crc = (crc ^ 0x04c11db6) | carry;
595 }
596 }
597
598 /* return the filter bit position */
599 return(crc >> 26);
600}
601
602/*
603 * Program the 64-bit multicast hash filter.
604 */
605static void
606re_setmulti(sc)
607 struct rl_softc *sc;
608{
609 struct ifnet *ifp;
610 int h = 0;
611 u_int32_t hashes[2] = { 0, 0 };
612 struct ifmultiaddr *ifma;
613 u_int32_t rxfilt;
614 int mcnt = 0;
615
616 ifp = &sc->arpcom.ac_if;
617
618 rxfilt = CSR_READ_4(sc, RL_RXCFG);
619
620 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
621 rxfilt |= RL_RXCFG_RX_MULTI;
622 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
623 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
624 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
625 return;
626 }
627
628 /* first, zot all the existing hash bits */
629 CSR_WRITE_4(sc, RL_MAR0, 0);
630 CSR_WRITE_4(sc, RL_MAR4, 0);
631
632 /* now program new ones */
633 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
634 if (ifma->ifma_addr->sa_family != AF_LINK)
635 continue;
636 h = re_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
637 if (h < 32)
638 hashes[0] |= (1 << h);
639 else
640 hashes[1] |= (1 << (h - 32));
641 mcnt++;
642 }
643
644 if (mcnt)
645 rxfilt |= RL_RXCFG_RX_MULTI;
646 else
647 rxfilt &= ~RL_RXCFG_RX_MULTI;
648
649 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
650 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
651 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
652
653 return;
654}
655
656static void
657re_reset(sc)
658 struct rl_softc *sc;
659{
660 register int i;
661
662 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
663
664 for (i = 0; i < RL_TIMEOUT; i++) {
665 DELAY(10);
666 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
667 break;
668 }
669 if (i == RL_TIMEOUT)
670 printf("re%d: reset never completed!\n", sc->rl_unit);
671
672 CSR_WRITE_1(sc, 0x82, 1);
673
674 return;
675}
676
677/*
678 * The following routine is designed to test for a defect on some
679 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
680 * lines connected to the bus, however for a 32-bit only card, they
681 * should be pulled high. The result of this defect is that the
682 * NIC will not work right if you plug it into a 64-bit slot: DMA
683 * operations will be done with 64-bit transfers, which will fail
684 * because the 64-bit data lines aren't connected.
685 *
686 * There's no way to work around this (short of talking a soldering
687 * iron to the board), however we can detect it. The method we use
688 * here is to put the NIC into digital loopback mode, set the receiver
689 * to promiscuous mode, and then try to send a frame. We then compare
690 * the frame data we sent to what was received. If the data matches,
691 * then the NIC is working correctly, otherwise we know the user has
692 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
693 * slot. In the latter case, there's no way the NIC can work correctly,
694 * so we print out a message on the console and abort the device attach.
695 */
696
697static int
698re_diag(sc)
699 struct rl_softc *sc;
700{
701 struct ifnet *ifp = &sc->arpcom.ac_if;
702 struct mbuf *m0;
703 struct ether_header *eh;
704 struct rl_desc *cur_rx;
705 u_int16_t status;
706 u_int32_t rxstat;
707 int total_len, i, error = 0;
708 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
709 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
710
711 /* Allocate a single mbuf */
712
713 MGETHDR(m0, M_DONTWAIT, MT_DATA);
714 if (m0 == NULL)
715 return(ENOBUFS);
716
717 /*
718 * Initialize the NIC in test mode. This sets the chip up
719 * so that it can send and receive frames, but performs the
720 * following special functions:
721 * - Puts receiver in promiscuous mode
722 * - Enables digital loopback mode
723 * - Leaves interrupts turned off
724 */
725
726 ifp->if_flags |= IFF_PROMISC;
727 sc->rl_testmode = 1;
728 re_init(sc);
729 re_stop(sc);
730 DELAY(100000);
731 re_init(sc);
732
733 /* Put some data in the mbuf */
734
735 eh = mtod(m0, struct ether_header *);
736 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
737 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
738 eh->ether_type = htons(ETHERTYPE_IP);
739 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
740
741 /*
742 * Queue the packet, start transmission.
743 * Note: IF_HANDOFF() ultimately calls re_start() for us.
744 */
745
746 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
747 IF_HANDOFF(&ifp->if_snd, m0, ifp);
748 m0 = NULL;
749
750 /* Wait for it to propagate through the chip */
751
752 DELAY(100000);
753 for (i = 0; i < RL_TIMEOUT; i++) {
754 status = CSR_READ_2(sc, RL_ISR);
755 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
756 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
757 break;
758 DELAY(10);
759 }
760
761 if (i == RL_TIMEOUT) {
762 printf("re%d: diagnostic failed, failed to receive packet "
763 "in loopback mode\n", sc->rl_unit);
764 error = EIO;
765 goto done;
766 }
767
768 /*
769 * The packet should have been dumped into the first
770 * entry in the RX DMA ring. Grab it from there.
771 */
772
773 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
774 sc->rl_ldata.rl_rx_list_map,
775 BUS_DMASYNC_POSTREAD);
776 bus_dmamap_sync(sc->rl_ldata.rl_mtag,
777 sc->rl_ldata.rl_rx_dmamap[0],
778 BUS_DMASYNC_POSTWRITE);
779 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
780 sc->rl_ldata.rl_rx_dmamap[0]);
781
782 m0 = sc->rl_ldata.rl_rx_mbuf[0];
783 sc->rl_ldata.rl_rx_mbuf[0] = NULL;
784 eh = mtod(m0, struct ether_header *);
785
786 cur_rx = &sc->rl_ldata.rl_rx_list[0];
787 total_len = RL_RXBYTES(cur_rx);
788 rxstat = le32toh(cur_rx->rl_cmdstat);
789
790 if (total_len != ETHER_MIN_LEN) {
791 printf("re%d: diagnostic failed, received short packet\n",
792 sc->rl_unit);
793 error = EIO;
794 goto done;
795 }
796
797 /* Test that the received packet data matches what we sent. */
798
799 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
800 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
801 ntohs(eh->ether_type) != ETHERTYPE_IP) {
802 printf("re%d: WARNING, DMA FAILURE!\n", sc->rl_unit);
803 printf("re%d: expected TX data: %6D/%6D/0x%x\n", sc->rl_unit,
804 dst, ":", src, ":", ETHERTYPE_IP);
805 printf("re%d: received RX data: %6D/%6D/0x%x\n", sc->rl_unit,
806 eh->ether_dhost, ":", eh->ether_shost, ":",
807 ntohs(eh->ether_type));
808 printf("re%d: You may have a defective 32-bit NIC plugged "
809 "into a 64-bit PCI slot.\n", sc->rl_unit);
810 printf("re%d: Please re-install the NIC in a 32-bit slot "
811 "for proper operation.\n", sc->rl_unit);
812 printf("re%d: Read the re(4) man page for more details.\n",
813 sc->rl_unit);
814 error = EIO;
815 }
816
817done:
818 /* Turn interface off, release resources */
819
820 sc->rl_testmode = 0;
821 ifp->if_flags &= ~IFF_PROMISC;
822 re_stop(sc);
823 if (m0 != NULL)
824 m_freem(m0);
825
826 return (error);
827}
828
829/*
830 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
831 * IDs against our list and return a device name if we find a match.
832 */
833static int
834re_probe(dev)
835 device_t dev;
836{
837 struct rl_type *t;
838 struct rl_softc *sc;
839 int rid;
840 u_int32_t hwrev;
841
842 t = re_devs;
843 sc = device_get_softc(dev);
844
845 while(t->rl_name != NULL) {
846 if ((pci_get_vendor(dev) == t->rl_vid) &&
847 (pci_get_device(dev) == t->rl_did)) {
848
849 /*
850 * Temporarily map the I/O space
851 * so we can read the chip ID register.
852 */
853 rid = RL_RID;
854 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid,
855 0, ~0, 1, RF_ACTIVE);
856 if (sc->rl_res == NULL) {
857 device_printf(dev,
858 "couldn't map ports/memory\n");
859 return(ENXIO);
860 }
861 sc->rl_btag = rman_get_bustag(sc->rl_res);
862 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
863 mtx_init(&sc->rl_mtx,
864 device_get_nameunit(dev),
865 MTX_NETWORK_LOCK, MTX_DEF);
866 RL_LOCK(sc);
867 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
868 bus_release_resource(dev, RL_RES,
869 RL_RID, sc->rl_res);
870 RL_UNLOCK(sc);
871 mtx_destroy(&sc->rl_mtx);
872 if (t->rl_basetype == hwrev) {
873 device_set_desc(dev, t->rl_name);
874 return(0);
875 }
876 }
877 t++;
878 }
879
880 return(ENXIO);
881}
882
883/*
884 * This routine takes the segment list provided as the result of
885 * a bus_dma_map_load() operation and assigns the addresses/lengths
886 * to RealTek DMA descriptors. This can be called either by the RX
887 * code or the TX code. In the RX case, we'll probably wind up mapping
888 * at most one segment. For the TX case, there could be any number of
889 * segments since TX packets may span multiple mbufs. In either case,
890 * if the number of segments is larger than the rl_maxsegs limit
891 * specified by the caller, we abort the mapping operation. Sadly,
892 * whoever designed the buffer mapping API did not provide a way to
893 * return an error from here, so we have to fake it a bit.
894 */
895
896static void
897re_dma_map_desc(arg, segs, nseg, mapsize, error)
898 void *arg;
899 bus_dma_segment_t *segs;
900 int nseg;
901 bus_size_t mapsize;
902 int error;
903{
904 struct rl_dmaload_arg *ctx;
905 struct rl_desc *d = NULL;
906 int i = 0, idx;
907
908 if (error)
909 return;
910
911 ctx = arg;
912
913 /* Signal error to caller if there's too many segments */
914 if (nseg > ctx->rl_maxsegs) {
915 ctx->rl_maxsegs = 0;
916 return;
917 }
918
919 /*
920 * Map the segment array into descriptors. Note that we set the
921 * start-of-frame and end-of-frame markers for either TX or RX, but
922 * they really only have meaning in the TX case. (In the RX case,
923 * it's the chip that tells us where packets begin and end.)
924 * We also keep track of the end of the ring and set the
925 * end-of-ring bits as needed, and we set the ownership bits
926 * in all except the very first descriptor. (The caller will
927 * set this descriptor later when it start transmission or
928 * reception.)
929 */
930 idx = ctx->rl_idx;
931 while(1) {
932 u_int32_t cmdstat;
933 d = &ctx->rl_ring[idx];
934 if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) {
935 ctx->rl_maxsegs = 0;
936 return;
937 }
938 cmdstat = segs[i].ds_len;
939 d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
940 d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
941 if (i == 0)
942 cmdstat |= RL_TDESC_CMD_SOF;
943 else
944 cmdstat |= RL_TDESC_CMD_OWN;
945 if (idx == (RL_RX_DESC_CNT - 1))
946 cmdstat |= RL_TDESC_CMD_EOR;
947 d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags);
948 i++;
949 if (i == nseg)
950 break;
951 RL_DESC_INC(idx);
952 }
953
954 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
955 ctx->rl_maxsegs = nseg;
956 ctx->rl_idx = idx;
957
958 return;
959}
960
961/*
962 * Map a single buffer address.
963 */
964
965static void
966re_dma_map_addr(arg, segs, nseg, error)
967 void *arg;
968 bus_dma_segment_t *segs;
969 int nseg;
970 int error;
971{
972 u_int32_t *addr;
973
974 if (error)
975 return;
976
977 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
978 addr = arg;
979 *addr = segs->ds_addr;
980
981 return;
982}
983
984static int
985re_allocmem(dev, sc)
986 device_t dev;
987 struct rl_softc *sc;
988{
989 int error;
990 int nseg;
991 int i;
992
993 /*
994 * Allocate map for RX mbufs.
995 */
996 nseg = 32;
997 error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0,
998 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
999 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
1000 NULL, NULL, &sc->rl_ldata.rl_mtag);
1001 if (error) {
1002 device_printf(dev, "could not allocate dma tag\n");
1003 return (ENOMEM);
1004 }
1005
1006 /*
1007 * Allocate map for TX descriptor list.
1008 */
1009 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1010 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1011 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
1012 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1013 if (error) {
1014 device_printf(dev, "could not allocate dma tag\n");
1015 return (ENOMEM);
1016 }
1017
1018 /* Allocate DMA'able memory for the TX ring */
1019
1020 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1021 (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1022 &sc->rl_ldata.rl_tx_list_map);
1023 if (error)
1024 return (ENOMEM);
1025
1026 /* Load the map for the TX ring. */
1027
1028 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1029 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1030 RL_TX_LIST_SZ, re_dma_map_addr,
1031 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1032
1033 /* Create DMA maps for TX buffers */
1034
1035 for (i = 0; i < RL_TX_DESC_CNT; i++) {
1036 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
1037 &sc->rl_ldata.rl_tx_dmamap[i]);
1038 if (error) {
1039 device_printf(dev, "can't create DMA map for TX\n");
1040 return(ENOMEM);
1041 }
1042 }
1043
1044 /*
1045 * Allocate map for RX descriptor list.
1046 */
1047 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1048 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1049 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
1050 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1051 if (error) {
1052 device_printf(dev, "could not allocate dma tag\n");
1053 return (ENOMEM);
1054 }
1055
1056 /* Allocate DMA'able memory for the RX ring */
1057
1058 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1059 (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1060 &sc->rl_ldata.rl_rx_list_map);
1061 if (error)
1062 return (ENOMEM);
1063
1064 /* Load the map for the RX ring. */
1065
1066 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1067 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1068 RL_TX_LIST_SZ, re_dma_map_addr,
1069 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1070
1071 /* Create DMA maps for RX buffers */
1072
1073 for (i = 0; i < RL_RX_DESC_CNT; i++) {
1074 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
1075 &sc->rl_ldata.rl_rx_dmamap[i]);
1076 if (error) {
1077 device_printf(dev, "can't create DMA map for RX\n");
1078 return(ENOMEM);
1079 }
1080 }
1081
1082 return(0);
1083}
1084
1085/*
1086 * Attach the interface. Allocate softc structures, do ifmedia
1087 * setup and ethernet/BPF attach.
1088 */
1089static int
1090re_attach(dev)
1091 device_t dev;
1092{
1093 u_char eaddr[ETHER_ADDR_LEN];
1094 u_int16_t as[3];
1095 struct rl_softc *sc;
1096 struct ifnet *ifp;
1097 struct rl_hwrev *hw_rev;
1098 int hwrev;
1099 u_int16_t re_did = 0;
1100 int unit, error = 0, rid, i;
1101
1102 sc = device_get_softc(dev);
1103 unit = device_get_unit(dev);
1104
1105 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1106 MTX_DEF | MTX_RECURSE);
1107#ifndef BURN_BRIDGES
1108 /*
1109 * Handle power management nonsense.
1110 */
1111
1112 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1113 u_int32_t iobase, membase, irq;
1114
1115 /* Save important PCI config data. */
1116 iobase = pci_read_config(dev, RL_PCI_LOIO, 4);
1117 membase = pci_read_config(dev, RL_PCI_LOMEM, 4);
1118 irq = pci_read_config(dev, RL_PCI_INTLINE, 4);
1119
1120 /* Reset the power state. */
1121 printf("re%d: chip is is in D%d power mode "
1122 "-- setting to D0\n", unit,
1123 pci_get_powerstate(dev));
1124
1125 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1126
1127 /* Restore PCI config data. */
1128 pci_write_config(dev, RL_PCI_LOIO, iobase, 4);
1129 pci_write_config(dev, RL_PCI_LOMEM, membase, 4);
1130 pci_write_config(dev, RL_PCI_INTLINE, irq, 4);
1131 }
1132#endif
1133 /*
1134 * Map control/status registers.
1135 */
1136 pci_enable_busmaster(dev);
1137
1138 rid = RL_RID;
1139 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid,
1140 0, ~0, 1, RF_ACTIVE);
1141
1142 if (sc->rl_res == NULL) {
1143 printf ("re%d: couldn't map ports/memory\n", unit);
1144 error = ENXIO;
1145 goto fail;
1146 }
1147
1148 sc->rl_btag = rman_get_bustag(sc->rl_res);
1149 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1150
1151 /* Allocate interrupt */
1152 rid = 0;
1153 sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1154 RF_SHAREABLE | RF_ACTIVE);
1155
1156 if (sc->rl_irq == NULL) {
1157 printf("re%d: couldn't map interrupt\n", unit);
1158 error = ENXIO;
1159 goto fail;
1160 }
1161
1162 /* Reset the adapter. */
1163 re_reset(sc);
1164
1165 hw_rev = re_hwrevs;
1166 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
1167 while (hw_rev->rl_desc != NULL) {
1168 if (hw_rev->rl_rev == hwrev) {
1169 sc->rl_type = hw_rev->rl_type;
1170 break;
1171 }
1172 hw_rev++;
1173 }
1174
1175 if (sc->rl_type == RL_8169) {
1176
1177 /* Set RX length mask */
1178
1179 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1180
1181 /* Force station address autoload from the EEPROM */
1182
1183 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_AUTOLOAD);
1184 for (i = 0; i < RL_TIMEOUT; i++) {
1185 if (!(CSR_READ_1(sc, RL_EECMD) & RL_EEMODE_AUTOLOAD))
1186 break;
1187 DELAY(100);
1188 }
1189 if (i == RL_TIMEOUT)
1190 printf ("re%d: eeprom autoload timed out\n", unit);
1191
1192 for (i = 0; i < ETHER_ADDR_LEN; i++)
1193 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1194 } else {
1195
1196 /* Set RX length mask */
1197
1198 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1199
1200 sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
1201 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1, 0);
1202 if (re_did != 0x8129)
1203 sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
1204
1205 /*
1206 * Get station address from the EEPROM.
1207 */
1208 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0);
1209 for (i = 0; i < 3; i++) {
1210 eaddr[(i * 2) + 0] = as[i] & 0xff;
1211 eaddr[(i * 2) + 1] = as[i] >> 8;
1212 }
1213 }
1214
1215 /*
1216 * A RealTek chip was detected. Inform the world.
1217 */
1218 printf("re%d: Ethernet address: %6D\n", unit, eaddr, ":");
1219
1220 sc->rl_unit = unit;
1221 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1222
1223 /*
1224 * Allocate the parent bus DMA tag appropriate for PCI.
1225 */
1226#define RL_NSEG_NEW 32
1227 error = bus_dma_tag_create(NULL, /* parent */
1228 1, 0, /* alignment, boundary */
1229 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1230 BUS_SPACE_MAXADDR, /* highaddr */
1231 NULL, NULL, /* filter, filterarg */
1232 MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */
1233 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1234 BUS_DMA_ALLOCNOW, /* flags */
1235 NULL, NULL, /* lockfunc, lockarg */
1236 &sc->rl_parent_tag);
1237 if (error)
1238 goto fail;
1239
1240 error = re_allocmem(dev, sc);
1241
1242 if (error)
1243 goto fail;
1244
1245 /* Do MII setup */
1246 if (mii_phy_probe(dev, &sc->rl_miibus,
1247 re_ifmedia_upd, re_ifmedia_sts)) {
1248 printf("re%d: MII without any phy!\n", sc->rl_unit);
1249 error = ENXIO;
1250 goto fail;
1251 }
1252
1253 ifp = &sc->arpcom.ac_if;
1254 ifp->if_softc = sc;
1255 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1256 ifp->if_mtu = ETHERMTU;
1257 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1258 ifp->if_ioctl = re_ioctl;
1259 ifp->if_output = ether_output;
1260 ifp->if_capabilities = IFCAP_VLAN_MTU;
1261 ifp->if_start = re_start;
1262 ifp->if_hwassist = RE_CSUM_FEATURES;
1263 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1264 ifp->if_watchdog = re_watchdog;
1265 ifp->if_init = re_init;
1266 if (sc->rl_type == RL_8169)
1267 ifp->if_baudrate = 1000000000;
1268 else
1269 ifp->if_baudrate = 100000000;
1270 ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN;
1271 ifp->if_capenable = ifp->if_capabilities;
1272
1273 callout_handle_init(&sc->rl_stat_ch);
1274
1275 /*
1276 * Call MI attach routine.
1277 */
1278 ether_ifattach(ifp, eaddr);
1279
1280 /* Perform hardware diagnostic. */
1281 error = re_diag(sc);
1282
1283 if (error) {
1284 printf("re%d: attach aborted due to hardware diag failure\n",
1285 unit);
1286 ether_ifdetach(ifp);
1287 goto fail;
1288 }
1289
1290 /* Hook interrupt last to avoid having to lock softc */
1291 error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET,
1292 re_intr, sc, &sc->rl_intrhand);
1293
1294 if (error) {
1295 printf("re%d: couldn't set up irq\n", unit);
1296 ether_ifdetach(ifp);
1297 goto fail;
1298 }
1299
1300fail:
1301 if (error)
1302 re_detach(dev);
1303
1304 return (error);
1305}
1306
1307/*
1308 * Shutdown hardware and free up resources. This can be called any
1309 * time after the mutex has been initialized. It is called in both
1310 * the error case in attach and the normal detach case so it needs
1311 * to be careful about only freeing resources that have actually been
1312 * allocated.
1313 */
1314static int
1315re_detach(dev)
1316 device_t dev;
1317{
1318 struct rl_softc *sc;
1319 struct ifnet *ifp;
1320 int i;
1321
1322 sc = device_get_softc(dev);
1323 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
1324 RL_LOCK(sc);
1325 ifp = &sc->arpcom.ac_if;
1326
1327 /* These should only be active if attach succeeded */
1328 if (device_is_attached(dev)) {
1329 re_stop(sc);
1330 /*
1331 * Force off the IFF_UP flag here, in case someone
1332 * still had a BPF descriptor attached to this
1333 * interface. If they do, ether_ifattach() will cause
1334 * the BPF code to try and clear the promisc mode
1335 * flag, which will bubble down to re_ioctl(),
1336 * which will try to call re_init() again. This will
1337 * turn the NIC back on and restart the MII ticker,
1338 * which will panic the system when the kernel tries
1339 * to invoke the re_tick() function that isn't there
1340 * anymore.
1341 */
1342 ifp->if_flags &= ~IFF_UP;
1343 ether_ifdetach(ifp);
1344 }
1345 if (sc->rl_miibus)
1346 device_delete_child(dev, sc->rl_miibus);
1347 bus_generic_detach(dev);
1348
1349 if (sc->rl_intrhand)
1350 bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand);
1351 if (sc->rl_irq)
1352 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq);
1353 if (sc->rl_res)
1354 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res);
1355
1356
1357 /* Unload and free the RX DMA ring memory and map */
1358
1359 if (sc->rl_ldata.rl_rx_list_tag) {
1360 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1361 sc->rl_ldata.rl_rx_list_map);
1362 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1363 sc->rl_ldata.rl_rx_list,
1364 sc->rl_ldata.rl_rx_list_map);
1365 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1366 }
1367
1368 /* Unload and free the TX DMA ring memory and map */
1369
1370 if (sc->rl_ldata.rl_tx_list_tag) {
1371 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1372 sc->rl_ldata.rl_tx_list_map);
1373 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1374 sc->rl_ldata.rl_tx_list,
1375 sc->rl_ldata.rl_tx_list_map);
1376 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1377 }
1378
1379 /* Destroy all the RX and TX buffer maps */
1380
1381 if (sc->rl_ldata.rl_mtag) {
1382 for (i = 0; i < RL_TX_DESC_CNT; i++)
1383 bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1384 sc->rl_ldata.rl_tx_dmamap[i]);
1385 for (i = 0; i < RL_RX_DESC_CNT; i++)
1386 bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1387 sc->rl_ldata.rl_rx_dmamap[i]);
1388 bus_dma_tag_destroy(sc->rl_ldata.rl_mtag);
1389 }
1390
1391 /* Unload and free the stats buffer and map */
1392
1393 if (sc->rl_ldata.rl_stag) {
1394 bus_dmamap_unload(sc->rl_ldata.rl_stag,
1395 sc->rl_ldata.rl_rx_list_map);
1396 bus_dmamem_free(sc->rl_ldata.rl_stag,
1397 sc->rl_ldata.rl_stats,
1398 sc->rl_ldata.rl_smap);
1399 bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1400 }
1401
1402 if (sc->rl_parent_tag)
1403 bus_dma_tag_destroy(sc->rl_parent_tag);
1404
1405 RL_UNLOCK(sc);
1406 mtx_destroy(&sc->rl_mtx);
1407
1408 return(0);
1409}
1410
1411static int
1412re_newbuf(sc, idx, m)
1413 struct rl_softc *sc;
1414 int idx;
1415 struct mbuf *m;
1416{
1417 struct rl_dmaload_arg arg;
1418 struct mbuf *n = NULL;
1419 int error;
1420
1421 if (m == NULL) {
1422 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1423 if (n == NULL)
1424 return(ENOBUFS);
1425 m = n;
1426 } else
1427 m->m_data = m->m_ext.ext_buf;
1428
1429 /*
1430 * Initialize mbuf length fields and fixup
1431 * alignment so that the frame payload is
1432 * longword aligned.
1433 */
1434 m->m_len = m->m_pkthdr.len = MCLBYTES;
1435 m_adj(m, ETHER_ALIGN);
1436
1437 arg.sc = sc;
1438 arg.rl_idx = idx;
1439 arg.rl_maxsegs = 1;
1440 arg.rl_flags = 0;
1441 arg.rl_ring = sc->rl_ldata.rl_rx_list;
1442
1443 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag,
1444 sc->rl_ldata.rl_rx_dmamap[idx], m, re_dma_map_desc,
1445 &arg, BUS_DMA_NOWAIT);
1446 if (error || arg.rl_maxsegs != 1) {
1447 if (n != NULL)
1448 m_freem(n);
1449 return (ENOMEM);
1450 }
1451
1452 sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN);
1453 sc->rl_ldata.rl_rx_mbuf[idx] = m;
1454
1455 bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1456 sc->rl_ldata.rl_rx_dmamap[idx],
1457 BUS_DMASYNC_PREREAD);
1458
1459 return(0);
1460}
1461
1462static int
1463re_tx_list_init(sc)
1464 struct rl_softc *sc;
1465{
1466 bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ);
1467 bzero ((char *)&sc->rl_ldata.rl_tx_mbuf,
1468 (RL_TX_DESC_CNT * sizeof(struct mbuf *)));
1469
1470 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1471 sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE);
1472 sc->rl_ldata.rl_tx_prodidx = 0;
1473 sc->rl_ldata.rl_tx_considx = 0;
1474 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT;
1475
1476 return(0);
1477}
1478
1479static int
1480re_rx_list_init(sc)
1481 struct rl_softc *sc;
1482{
1483 int i;
1484
1485 bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ);
1486 bzero ((char *)&sc->rl_ldata.rl_rx_mbuf,
1487 (RL_RX_DESC_CNT * sizeof(struct mbuf *)));
1488
1489 for (i = 0; i < RL_RX_DESC_CNT; i++) {
1490 if (re_newbuf(sc, i, NULL) == ENOBUFS)
1491 return(ENOBUFS);
1492 }
1493
1494 /* Flush the RX descriptors */
1495
1496 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1497 sc->rl_ldata.rl_rx_list_map,
1498 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1499
1500 sc->rl_ldata.rl_rx_prodidx = 0;
1501 sc->rl_head = sc->rl_tail = NULL;
1502
1503 return(0);
1504}
1505
1506/*
1507 * RX handler for C+ and 8169. For the gigE chips, we support
1508 * the reception of jumbo frames that have been fragmented
1509 * across multiple 2K mbuf cluster buffers.
1510 */
1511static void
1512re_rxeof(sc)
1513 struct rl_softc *sc;
1514{
1515 struct mbuf *m;
1516 struct ifnet *ifp;
1517 int i, total_len;
1518 struct rl_desc *cur_rx;
1519 u_int32_t rxstat, rxvlan;
1520
1521 RL_LOCK_ASSERT(sc);
1522
1523 ifp = &sc->arpcom.ac_if;
1524 i = sc->rl_ldata.rl_rx_prodidx;
1525
1526 /* Invalidate the descriptor memory */
1527
1528 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1529 sc->rl_ldata.rl_rx_list_map,
1530 BUS_DMASYNC_POSTREAD);
1531
1532 while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) {
1533
1534 cur_rx = &sc->rl_ldata.rl_rx_list[i];
1535 m = sc->rl_ldata.rl_rx_mbuf[i];
1536 total_len = RL_RXBYTES(cur_rx);
1537 rxstat = le32toh(cur_rx->rl_cmdstat);
1538 rxvlan = le32toh(cur_rx->rl_vlanctl);
1539
1540 /* Invalidate the RX mbuf and unload its map */
1541
1542 bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1543 sc->rl_ldata.rl_rx_dmamap[i],
1544 BUS_DMASYNC_POSTWRITE);
1545 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
1546 sc->rl_ldata.rl_rx_dmamap[i]);
1547
1548 if (!(rxstat & RL_RDESC_STAT_EOF)) {
1549 m->m_len = MCLBYTES - ETHER_ALIGN;
1550 if (sc->rl_head == NULL)
1551 sc->rl_head = sc->rl_tail = m;
1552 else {
1553 m->m_flags &= ~M_PKTHDR;
1554 sc->rl_tail->m_next = m;
1555 sc->rl_tail = m;
1556 }
1557 re_newbuf(sc, i, NULL);
1558 RL_DESC_INC(i);
1559 continue;
1560 }
1561
1562 /*
1563 * NOTE: for the 8139C+, the frame length field
1564 * is always 12 bits in size, but for the gigE chips,
1565 * it is 13 bits (since the max RX frame length is 16K).
1566 * Unfortunately, all 32 bits in the status word
1567 * were already used, so to make room for the extra
1568 * length bit, RealTek took out the 'frame alignment
1569 * error' bit and shifted the other status bits
1570 * over one slot. The OWN, EOR, FS and LS bits are
1571 * still in the same places. We have already extracted
1572 * the frame length and checked the OWN bit, so rather
1573 * than using an alternate bit mapping, we shift the
1574 * status bits one space to the right so we can evaluate
1575 * them using the 8169 status as though it was in the
1576 * same format as that of the 8139C+.
1577 */
1578 if (sc->rl_type == RL_8169)
1579 rxstat >>= 1;
1580
1581 if (rxstat & RL_RDESC_STAT_RXERRSUM) {
1582 ifp->if_ierrors++;
1583 /*
1584 * If this is part of a multi-fragment packet,
1585 * discard all the pieces.
1586 */
1587 if (sc->rl_head != NULL) {
1588 m_freem(sc->rl_head);
1589 sc->rl_head = sc->rl_tail = NULL;
1590 }
1591 re_newbuf(sc, i, m);
1592 RL_DESC_INC(i);
1593 continue;
1594 }
1595
1596 /*
1597 * If allocating a replacement mbuf fails,
1598 * reload the current one.
1599 */
1600
1601 if (re_newbuf(sc, i, NULL)) {
1602 ifp->if_ierrors++;
1603 if (sc->rl_head != NULL) {
1604 m_freem(sc->rl_head);
1605 sc->rl_head = sc->rl_tail = NULL;
1606 }
1607 re_newbuf(sc, i, m);
1608 RL_DESC_INC(i);
1609 continue;
1610 }
1611
1612 RL_DESC_INC(i);
1613
1614 if (sc->rl_head != NULL) {
1615 m->m_len = total_len % (MCLBYTES - ETHER_ALIGN);
1616 /*
1617 * Special case: if there's 4 bytes or less
1618 * in this buffer, the mbuf can be discarded:
1619 * the last 4 bytes is the CRC, which we don't
1620 * care about anyway.
1621 */
1622 if (m->m_len <= ETHER_CRC_LEN) {
1623 sc->rl_tail->m_len -=
1624 (ETHER_CRC_LEN - m->m_len);
1625 m_freem(m);
1626 } else {
1627 m->m_len -= ETHER_CRC_LEN;
1628 m->m_flags &= ~M_PKTHDR;
1629 sc->rl_tail->m_next = m;
1630 }
1631 m = sc->rl_head;
1632 sc->rl_head = sc->rl_tail = NULL;
1633 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1634 } else
1635 m->m_pkthdr.len = m->m_len =
1636 (total_len - ETHER_CRC_LEN);
1637
1638 ifp->if_ipackets++;
1639 m->m_pkthdr.rcvif = ifp;
1640
1641 /* Do RX checksumming if enabled */
1642
1643 if (ifp->if_capenable & IFCAP_RXCSUM) {
1644
1645 /* Check IP header checksum */
1646 if (rxstat & RL_RDESC_STAT_PROTOID)
1647 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1648 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
1649 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1650
1651 /* Check TCP/UDP checksum */
1652 if ((RL_TCPPKT(rxstat) &&
1653 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1654 (RL_UDPPKT(rxstat) &&
1655 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
1656 m->m_pkthdr.csum_flags |=
1657 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1658 m->m_pkthdr.csum_data = 0xffff;
1659 }
1660 }
1661
1662 if (rxvlan & RL_RDESC_VLANCTL_TAG)
1663 VLAN_INPUT_TAG(ifp, m,
1664 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue);
1665 RL_UNLOCK(sc);
1666 (*ifp->if_input)(ifp, m);
1667 RL_LOCK(sc);
1668 }
1669
1670 /* Flush the RX DMA ring */
1671
1672 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1673 sc->rl_ldata.rl_rx_list_map,
1674 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1675
1676 sc->rl_ldata.rl_rx_prodidx = i;
1677
1678 return;
1679}
1680
1681static void
1682re_txeof(sc)
1683 struct rl_softc *sc;
1684{
1685 struct ifnet *ifp;
1686 u_int32_t txstat;
1687 int idx;
1688
1689 ifp = &sc->arpcom.ac_if;
1690 idx = sc->rl_ldata.rl_tx_considx;
1691
1692 /* Invalidate the TX descriptor list */
1693
1694 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1695 sc->rl_ldata.rl_tx_list_map,
1696 BUS_DMASYNC_POSTREAD);
1697
1698 while (idx != sc->rl_ldata.rl_tx_prodidx) {
1699
1700 txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat);
1701 if (txstat & RL_TDESC_CMD_OWN)
1702 break;
1703
1704 /*
1705 * We only stash mbufs in the last descriptor
1706 * in a fragment chain, which also happens to
1707 * be the only place where the TX status bits
1708 * are valid.
1709 */
1710
1711 if (txstat & RL_TDESC_CMD_EOF) {
1712 m_freem(sc->rl_ldata.rl_tx_mbuf[idx]);
1713 sc->rl_ldata.rl_tx_mbuf[idx] = NULL;
1714 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
1715 sc->rl_ldata.rl_tx_dmamap[idx]);
1716 if (txstat & (RL_TDESC_STAT_EXCESSCOL|
1717 RL_TDESC_STAT_COLCNT))
1718 ifp->if_collisions++;
1719 if (txstat & RL_TDESC_STAT_TXERRSUM)
1720 ifp->if_oerrors++;
1721 else
1722 ifp->if_opackets++;
1723 }
1724 sc->rl_ldata.rl_tx_free++;
1725 RL_DESC_INC(idx);
1726 }
1727
1728 /* No changes made to the TX ring, so no flush needed */
1729
1730 if (idx != sc->rl_ldata.rl_tx_considx) {
1731 sc->rl_ldata.rl_tx_considx = idx;
1732 ifp->if_flags &= ~IFF_OACTIVE;
1733 ifp->if_timer = 0;
1734 }
1735
1736 /*
1737 * If not all descriptors have been released reaped yet,
1738 * reload the timer so that we will eventually get another
1739 * interrupt that will cause us to re-enter this routine.
1740 * This is done in case the transmitter has gone idle.
1741 */
1742 if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT)
1743 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
1744
1745 return;
1746}
1747
1748static void
1749re_tick(xsc)
1750 void *xsc;
1751{
1752 struct rl_softc *sc;
1753 struct mii_data *mii;
1754
1755 sc = xsc;
1756 RL_LOCK(sc);
1757 mii = device_get_softc(sc->rl_miibus);
1758
1759 mii_tick(mii);
1760
1761 sc->rl_stat_ch = timeout(re_tick, sc, hz);
1762 RL_UNLOCK(sc);
1763
1764 return;
1765}
1766
1767#ifdef DEVICE_POLLING
1768static void
1769re_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1770{
1771 struct rl_softc *sc = ifp->if_softc;
1772
1773 RL_LOCK(sc);
1774 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1775 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
1776 goto done;
1777 }
1778
1779 sc->rxcycles = count;
1780 re_rxeof(sc);
1781 re_txeof(sc);
1782
1783 if (ifp->if_snd.ifq_head != NULL)
1784 (*ifp->if_start)(ifp);
1785
1786 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1787 u_int16_t status;
1788
1789 status = CSR_READ_2(sc, RL_ISR);
1790 if (status == 0xffff)
1791 goto done;
1792 if (status)
1793 CSR_WRITE_2(sc, RL_ISR, status);
1794
1795 /*
1796 * XXX check behaviour on receiver stalls.
1797 */
1798
1799 if (status & RL_ISR_SYSTEM_ERR) {
1800 re_reset(sc);
1801 re_init(sc);
1802 }
1803 }
1804done:
1805 RL_UNLOCK(sc);
1806}
1807#endif /* DEVICE_POLLING */
1808
1809static void
1810re_intr(arg)
1811 void *arg;
1812{
1813 struct rl_softc *sc;
1814 struct ifnet *ifp;
1815 u_int16_t status;
1816
1817 sc = arg;
1818
1819 if (sc->suspended) {
1820 return;
1821 }
1822
1823 RL_LOCK(sc);
1824 ifp = &sc->arpcom.ac_if;
1825
1826 if (!(ifp->if_flags & IFF_UP)) {
1827 RL_UNLOCK(sc);
1828 return;
1829 }
1830
1831#ifdef DEVICE_POLLING
1832 if (ifp->if_flags & IFF_POLLING)
1833 goto done;
1834 if (ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
1835 CSR_WRITE_2(sc, RL_IMR, 0x0000);
1836 re_poll(ifp, 0, 1);
1837 goto done;
1838 }
1839#endif /* DEVICE_POLLING */
1840
1841 for (;;) {
1842
1843 status = CSR_READ_2(sc, RL_ISR);
1844 /* If the card has gone away the read returns 0xffff. */
1845 if (status == 0xffff)
1846 break;
1847 if (status)
1848 CSR_WRITE_2(sc, RL_ISR, status);
1849
1850 if ((status & RL_INTRS_CPLUS) == 0)
1851 break;
1852
1853 if (status & RL_ISR_RX_OK)
1854 re_rxeof(sc);
1855
1856 if (status & RL_ISR_RX_ERR)
1857 re_rxeof(sc);
1858
1859 if ((status & RL_ISR_TIMEOUT_EXPIRED) ||
1860 (status & RL_ISR_TX_ERR) ||
1861 (status & RL_ISR_TX_DESC_UNAVAIL))
1862 re_txeof(sc);
1863
1864 if (status & RL_ISR_SYSTEM_ERR) {
1865 re_reset(sc);
1866 re_init(sc);
1867 }
1868
1869 if (status & RL_ISR_LINKCHG) {
1870 untimeout(re_tick, sc, sc->rl_stat_ch);
1871 re_tick(sc);
1872 }
1873 }
1874
1875 if (ifp->if_snd.ifq_head != NULL)
1876 (*ifp->if_start)(ifp);
1877
1878#ifdef DEVICE_POLLING
1879done:
1880#endif
1881 RL_UNLOCK(sc);
1882
1883 return;
1884}
1885
1886static int
1887re_encap(sc, m_head, idx)
1888 struct rl_softc *sc;
1889 struct mbuf *m_head;
1890 int *idx;
1891{
1892 struct mbuf *m_new = NULL;
1893 struct rl_dmaload_arg arg;
1894 bus_dmamap_t map;
1895 int error;
1896 struct m_tag *mtag;
1897
1898 if (sc->rl_ldata.rl_tx_free <= 4)
1899 return(EFBIG);
1900
1901 /*
1902 * Set up checksum offload. Note: checksum offload bits must
1903 * appear in all descriptors of a multi-descriptor transmit
1904 * attempt. (This is according to testing done with an 8169
1905 * chip. I'm not sure if this is a requirement or a bug.)
1906 */
1907
1908 arg.rl_flags = 0;
1909
1910 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1911 arg.rl_flags |= RL_TDESC_CMD_IPCSUM;
1912 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1913 arg.rl_flags |= RL_TDESC_CMD_TCPCSUM;
1914 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1915 arg.rl_flags |= RL_TDESC_CMD_UDPCSUM;
1916
1917 arg.sc = sc;
1918 arg.rl_idx = *idx;
1919 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
1920 if (arg.rl_maxsegs > 4)
1921 arg.rl_maxsegs -= 4;
1922 arg.rl_ring = sc->rl_ldata.rl_tx_list;
1923
1924 map = sc->rl_ldata.rl_tx_dmamap[*idx];
1925 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
1926 m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
1927
1928 if (error && error != EFBIG) {
1929 printf("re%d: can't map mbuf (error %d)\n", sc->rl_unit, error);
1930 return(ENOBUFS);
1931 }
1932
1933 /* Too many segments to map, coalesce into a single mbuf */
1934
1935 if (error || arg.rl_maxsegs == 0) {
1936 m_new = m_defrag(m_head, M_DONTWAIT);
1937 if (m_new == NULL)
1938 return(1);
1939 else
1940 m_head = m_new;
1941
1942 arg.sc = sc;
1943 arg.rl_idx = *idx;
1944 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
1945 arg.rl_ring = sc->rl_ldata.rl_tx_list;
1946
1947 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
1948 m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
1949 if (error) {
1950 printf("re%d: can't map mbuf (error %d)\n",
1951 sc->rl_unit, error);
1952 return(EFBIG);
1953 }
1954 }
1955
1956 /*
1957 * Insure that the map for this transmission
1958 * is placed at the array index of the last descriptor
1959 * in this chain.
1960 */
1961 sc->rl_ldata.rl_tx_dmamap[*idx] =
1962 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx];
1963 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map;
1964
1965 sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head;
1966 sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs;
1967
1968 /*
1969 * Set up hardware VLAN tagging. Note: vlan tag info must
1970 * appear in the first descriptor of a multi-descriptor
1971 * transmission attempt.
1972 */
1973
1974 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
1975 if (mtag != NULL)
1976 sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl =
1977 htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG);
1978
1979 /* Transfer ownership of packet to the chip. */
1980
1981 sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |=
1982 htole32(RL_TDESC_CMD_OWN);
1983 if (*idx != arg.rl_idx)
1984 sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |=
1985 htole32(RL_TDESC_CMD_OWN);
1986
1987 RL_DESC_INC(arg.rl_idx);
1988 *idx = arg.rl_idx;
1989
1990 return(0);
1991}
1992
1993/*
1994 * Main transmit routine for C+ and gigE NICs.
1995 */
1996
1997static void
1998re_start(ifp)
1999 struct ifnet *ifp;
2000{
2001 struct rl_softc *sc;
2002 struct mbuf *m_head = NULL;
2003 int idx;
2004
2005 sc = ifp->if_softc;
2006 RL_LOCK(sc);
2007
2008 idx = sc->rl_ldata.rl_tx_prodidx;
2009
2010 while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) {
2011 IF_DEQUEUE(&ifp->if_snd, m_head);
2012 if (m_head == NULL)
2013 break;
2014
2015 if (re_encap(sc, m_head, &idx)) {
2016 IF_PREPEND(&ifp->if_snd, m_head);
2017 ifp->if_flags |= IFF_OACTIVE;
2018 break;
2019 }
2020
2021 /*
2022 * If there's a BPF listener, bounce a copy of this frame
2023 * to him.
2024 */
2025 BPF_MTAP(ifp, m_head);
2026 }
2027
2028 /* Flush the TX descriptors */
2029
2030 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2031 sc->rl_ldata.rl_tx_list_map,
2032 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2033
2034 sc->rl_ldata.rl_tx_prodidx = idx;
2035
2036 /*
2037 * RealTek put the TX poll request register in a different
2038 * location on the 8169 gigE chip. I don't know why.
2039 */
2040
2041 if (sc->rl_type == RL_8169)
2042 CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START);
2043 else
2044 CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START);
2045
2046 /*
2047 * Use the countdown timer for interrupt moderation.
2048 * 'TX done' interrupts are disabled. Instead, we reset the
2049 * countdown timer, which will begin counting until it hits
2050 * the value in the TIMERINT register, and then trigger an
2051 * interrupt. Each time we write to the TIMERCNT register,
2052 * the timer count is reset to 0.
2053 */
2054 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2055
2056 RL_UNLOCK(sc);
2057
2058 /*
2059 * Set a timeout in case the chip goes out to lunch.
2060 */
2061 ifp->if_timer = 5;
2062
2063 return;
2064}
2065
2066static void
2067re_init(xsc)
2068 void *xsc;
2069{
2070 struct rl_softc *sc = xsc;
2071 struct ifnet *ifp = &sc->arpcom.ac_if;
2072 struct mii_data *mii;
2073 u_int32_t rxcfg = 0;
2074
2075 RL_LOCK(sc);
2076 mii = device_get_softc(sc->rl_miibus);
2077
2078 /*
2079 * Cancel pending I/O and free all RX/TX buffers.
2080 */
2081 re_stop(sc);
2082
2083 /*
2084 * Enable C+ RX and TX mode, as well as VLAN stripping and
2085 * RX checksum offload. We must configure the C+ register
2086 * before all others.
2087 */
2088 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB|
2089 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW|
2090 RL_CPLUSCMD_VLANSTRIP|
2091 (ifp->if_capenable & IFCAP_RXCSUM ?
2092 RL_CPLUSCMD_RXCSUM_ENB : 0));
2093
2094 /*
2095 * Init our MAC address. Even though the chipset
2096 * documentation doesn't mention it, we need to enter "Config
2097 * register write enable" mode to modify the ID registers.
2098 */
2099 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2100 CSR_WRITE_STREAM_4(sc, RL_IDR0,
2101 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
2102 CSR_WRITE_STREAM_4(sc, RL_IDR4,
2103 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
2104 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2105
2106 /*
2107 * For C+ mode, initialize the RX descriptors and mbufs.
2108 */
2109 re_rx_list_init(sc);
2110 re_tx_list_init(sc);
2111
2112 /*
2113 * Enable transmit and receive.
2114 */
2115 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2116
2117 /*
2118 * Set the initial TX and RX configuration.
2119 */
2120 if (sc->rl_testmode) {
2121 if (sc->rl_type == RL_8169)
2122 CSR_WRITE_4(sc, RL_TXCFG,
2123 RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
2124 else
2125 CSR_WRITE_4(sc, RL_TXCFG,
2126 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
2127 } else
2128 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
2129 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
2130
2131 /* Set the individual bit to receive frames for this host only. */
2132 rxcfg = CSR_READ_4(sc, RL_RXCFG);
2133 rxcfg |= RL_RXCFG_RX_INDIV;
2134
2135 /* If we want promiscuous mode, set the allframes bit. */
2136 if (ifp->if_flags & IFF_PROMISC) {
2137 rxcfg |= RL_RXCFG_RX_ALLPHYS;
2138 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2139 } else {
2140 rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
2141 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2142 }
2143
2144 /*
2145 * Set capture broadcast bit to capture broadcast frames.
2146 */
2147 if (ifp->if_flags & IFF_BROADCAST) {
2148 rxcfg |= RL_RXCFG_RX_BROAD;
2149 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2150 } else {
2151 rxcfg &= ~RL_RXCFG_RX_BROAD;
2152 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2153 }
2154
2155 /*
2156 * Program the multicast filter, if necessary.
2157 */
2158 re_setmulti(sc);
2159
2160#ifdef DEVICE_POLLING
2161 /*
2162 * Disable interrupts if we are polling.
2163 */
2164 if (ifp->if_flags & IFF_POLLING)
2165 CSR_WRITE_2(sc, RL_IMR, 0);
2166 else /* otherwise ... */
2167#endif /* DEVICE_POLLING */
2168 /*
2169 * Enable interrupts.
2170 */
2171 if (sc->rl_testmode)
2172 CSR_WRITE_2(sc, RL_IMR, 0);
2173 else
2174 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2175
2176 /* Set initial TX threshold */
2177 sc->rl_txthresh = RL_TX_THRESH_INIT;
2178
2179 /* Start RX/TX process. */
2180 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
2181#ifdef notdef
2182 /* Enable receiver and transmitter. */
2183 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2184#endif
2185 /*
2186 * Load the addresses of the RX and TX lists into the chip.
2187 */
2188
2189 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
2190 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
2191 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
2192 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
2193
2194 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
2195 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
2196 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
2197 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
2198
2199 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
2200
2201 /*
2202 * Initialize the timer interrupt register so that
2203 * a timer interrupt will be generated once the timer
2204 * reaches a certain number of ticks. The timer is
2205 * reloaded on each transmit. This gives us TX interrupt
2206 * moderation, which dramatically improves TX frame rate.
2207 */
2208
2209 if (sc->rl_type == RL_8169)
2210 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
2211 else
2212 CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
2213
2214 /*
2215 * For 8169 gigE NICs, set the max allowed RX packet
2216 * size so we can receive jumbo frames.
2217 */
2218 if (sc->rl_type == RL_8169)
2219 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
2220
2221 if (sc->rl_testmode) {
2222 RL_UNLOCK(sc);
2223 return;
2224 }
2225
2226 mii_mediachg(mii);
2227
2228 CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
2229
2230 ifp->if_flags |= IFF_RUNNING;
2231 ifp->if_flags &= ~IFF_OACTIVE;
2232
2233 sc->rl_stat_ch = timeout(re_tick, sc, hz);
2234 RL_UNLOCK(sc);
2235
2236 return;
2237}
2238
2239/*
2240 * Set media options.
2241 */
2242static int
2243re_ifmedia_upd(ifp)
2244 struct ifnet *ifp;
2245{
2246 struct rl_softc *sc;
2247 struct mii_data *mii;
2248
2249 sc = ifp->if_softc;
2250 mii = device_get_softc(sc->rl_miibus);
2251 mii_mediachg(mii);
2252
2253 return(0);
2254}
2255
2256/*
2257 * Report current media status.
2258 */
2259static void
2260re_ifmedia_sts(ifp, ifmr)
2261 struct ifnet *ifp;
2262 struct ifmediareq *ifmr;
2263{
2264 struct rl_softc *sc;
2265 struct mii_data *mii;
2266
2267 sc = ifp->if_softc;
2268 mii = device_get_softc(sc->rl_miibus);
2269
2270 mii_pollstat(mii);
2271 ifmr->ifm_active = mii->mii_media_active;
2272 ifmr->ifm_status = mii->mii_media_status;
2273
2274 return;
2275}
2276
2277static int
2278re_ioctl(ifp, command, data)
2279 struct ifnet *ifp;
2280 u_long command;
2281 caddr_t data;
2282{
2283 struct rl_softc *sc = ifp->if_softc;
2284 struct ifreq *ifr = (struct ifreq *) data;
2285 struct mii_data *mii;
2286 int error = 0;
2287
2288 RL_LOCK(sc);
2289
2290 switch(command) {
2291 case SIOCSIFMTU:
2292 if (ifr->ifr_mtu > RL_JUMBO_MTU)
2293 error = EINVAL;
2294 ifp->if_mtu = ifr->ifr_mtu;
2295 break;
2296 case SIOCSIFFLAGS:
2297 if (ifp->if_flags & IFF_UP) {
2298 re_init(sc);
2299 } else {
2300 if (ifp->if_flags & IFF_RUNNING)
2301 re_stop(sc);
2302 }
2303 error = 0;
2304 break;
2305 case SIOCADDMULTI:
2306 case SIOCDELMULTI:
2307 re_setmulti(sc);
2308 error = 0;
2309 break;
2310 case SIOCGIFMEDIA:
2311 case SIOCSIFMEDIA:
2312 mii = device_get_softc(sc->rl_miibus);
2313 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2314 break;
2315 case SIOCSIFCAP:
2316 ifp->if_capenable = ifr->ifr_reqcap;
2317 if (ifp->if_capenable & IFCAP_TXCSUM)
2318 ifp->if_hwassist = RE_CSUM_FEATURES;
2319 else
2320 ifp->if_hwassist = 0;
2321 if (ifp->if_flags & IFF_RUNNING)
2322 re_init(sc);
2323 break;
2324 default:
2325 error = ether_ioctl(ifp, command, data);
2326 break;
2327 }
2328
2329 RL_UNLOCK(sc);
2330
2331 return(error);
2332}
2333
2334static void
2335re_watchdog(ifp)
2336 struct ifnet *ifp;
2337{
2338 struct rl_softc *sc;
2339
2340 sc = ifp->if_softc;
2341 RL_LOCK(sc);
2342 printf("re%d: watchdog timeout\n", sc->rl_unit);
2343 ifp->if_oerrors++;
2344
2345 re_txeof(sc);
2346 re_rxeof(sc);
2347
2348 re_init(sc);
2349
2350 RL_UNLOCK(sc);
2351
2352 return;
2353}
2354
2355/*
2356 * Stop the adapter and free any mbufs allocated to the
2357 * RX and TX lists.
2358 */
2359static void
2360re_stop(sc)
2361 struct rl_softc *sc;
2362{
2363 register int i;
2364 struct ifnet *ifp;
2365
2366 RL_LOCK(sc);
2367 ifp = &sc->arpcom.ac_if;
2368 ifp->if_timer = 0;
2369
2370 untimeout(re_tick, sc, sc->rl_stat_ch);
2371 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2372#ifdef DEVICE_POLLING
2373 ether_poll_deregister(ifp);
2374#endif /* DEVICE_POLLING */
2375
2376 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2377 CSR_WRITE_2(sc, RL_IMR, 0x0000);
2378
2379 if (sc->rl_head != NULL) {
2380 m_freem(sc->rl_head);
2381 sc->rl_head = sc->rl_tail = NULL;
2382 }
2383
2384 /* Free the TX list buffers. */
2385
2386 for (i = 0; i < RL_TX_DESC_CNT; i++) {
2387 if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) {
2388 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2389 sc->rl_ldata.rl_tx_dmamap[i]);
2390 m_freem(sc->rl_ldata.rl_tx_mbuf[i]);
2391 sc->rl_ldata.rl_tx_mbuf[i] = NULL;
2392 }
2393 }
2394
2395 /* Free the RX list buffers. */
2396
2397 for (i = 0; i < RL_RX_DESC_CNT; i++) {
2398 if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) {
2399 bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2400 sc->rl_ldata.rl_rx_dmamap[i]);
2401 m_freem(sc->rl_ldata.rl_rx_mbuf[i]);
2402 sc->rl_ldata.rl_rx_mbuf[i] = NULL;
2403 }
2404 }
2405
2406 RL_UNLOCK(sc);
2407 return;
2408}
2409
2410/*
2411 * Device suspend routine. Stop the interface and save some PCI
2412 * settings in case the BIOS doesn't restore them properly on
2413 * resume.
2414 */
2415static int
2416re_suspend(dev)
2417 device_t dev;
2418{
2419 register int i;
2420 struct rl_softc *sc;
2421
2422 sc = device_get_softc(dev);
2423
2424 re_stop(sc);
2425
2426 for (i = 0; i < 5; i++)
2427 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2428 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2429 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2430 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2431 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2432
2433 sc->suspended = 1;
2434
2435 return (0);
2436}
2437
2438/*
2439 * Device resume routine. Restore some PCI settings in case the BIOS
2440 * doesn't, re-enable busmastering, and restart the interface if
2441 * appropriate.
2442 */
2443static int
2444re_resume(dev)
2445 device_t dev;
2446{
2447 register int i;
2448 struct rl_softc *sc;
2449 struct ifnet *ifp;
2450
2451 sc = device_get_softc(dev);
2452 ifp = &sc->arpcom.ac_if;
2453
2454 /* better way to do this? */
2455 for (i = 0; i < 5; i++)
2456 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2457 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2458 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2459 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2460 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2461
2462 /* reenable busmastering */
2463 pci_enable_busmaster(dev);
2464 pci_enable_io(dev, RL_RES);
2465
2466 /* reinitialize interface if necessary */
2467 if (ifp->if_flags & IFF_UP)
2468 re_init(sc);
2469
2470 sc->suspended = 0;
2471
2472 return (0);
2473}
2474
2475/*
2476 * Stop all chip I/O so that the kernel's probe routines don't
2477 * get confused by errant DMAs when rebooting.
2478 */
2479static void
2480re_shutdown(dev)
2481 device_t dev;
2482{
2483 struct rl_softc *sc;
2484
2485 sc = device_get_softc(dev);
2486
2487 re_stop(sc);
2488
2489 return;
2490}