Deleted Added
full compact
if_sf.c (175520) if_sf.c (175526)
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 17 unchanged lines hidden (view full) ---

26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 17 unchanged lines hidden (view full) ---

26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 175520 2008-01-21 04:23:47Z yongari $");
34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 175526 2008-01-21 06:38:23Z yongari $");
35
36/*
37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38 * Programming manual is available from:
39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
40 *
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Department of Electical Engineering
43 * Columbia University, New York City
44 */
45/*
46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
47 * controller designed with flexibility and reducing CPU load in mind.
48 * The Starfire offers high and low priority buffer queues, a
49 * producer/consumer index mechanism and several different buffer
50 * queue and completion queue descriptor types. Any one of a number
51 * of different driver designs can be used, depending on system and
35
36/*
37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38 * Programming manual is available from:
39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
40 *
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Department of Electical Engineering
43 * Columbia University, New York City
44 */
45/*
46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
47 * controller designed with flexibility and reducing CPU load in mind.
48 * The Starfire offers high and low priority buffer queues, a
49 * producer/consumer index mechanism and several different buffer
50 * queue and completion queue descriptor types. Any one of a number
51 * of different driver designs can be used, depending on system and
52 * OS requirements. This driver makes use of type0 transmit frame
53 * descriptors (since BSD fragments packets across an mbuf chain)
52 * OS requirements. This driver makes use of type2 transmit frame
53 * descriptors to take full advantage of fragmented packets buffers
54 * and two RX buffer queues prioritized on size (one queue for small
55 * frames that will fit into a single mbuf, another with full size
56 * mbuf clusters for everything else). The producer/consumer indexes
57 * and completion queues are also used.
58 *
59 * One downside to the Starfire has to do with alignment: buffer
60 * queues must be aligned on 256-byte boundaries, and receive buffers
61 * must be aligned on longword boundaries. The receive buffer alignment
54 * and two RX buffer queues prioritized on size (one queue for small
55 * frames that will fit into a single mbuf, another with full size
56 * mbuf clusters for everything else). The producer/consumer indexes
57 * and completion queues are also used.
58 *
59 * One downside to the Starfire has to do with alignment: buffer
60 * queues must be aligned on 256-byte boundaries, and receive buffers
61 * must be aligned on longword boundaries. The receive buffer alignment
62 * causes problems on the Alpha platform, where the packet payload
63 * should be longword aligned. There is no simple way around this.
62 * causes problems on the strict alignment architecture, where the
63 * packet payload should be longword aligned. There is no simple way
64 * around this.
64 *
65 * For receive filtering, the Starfire offers 16 perfect filter slots
66 * and a 512-bit hash table.
67 *
68 * The Starfire has no internal transceiver, relying instead on an
69 * external MII-based transceiver. Accessing registers on external
70 * PHYs is done through a special register map rather than with the
71 * usual bitbang MDIO method.

--- 8 unchanged lines hidden (view full) ---

80 */
81
82#ifdef HAVE_KERNEL_OPTION_HEADERS
83#include "opt_device_polling.h"
84#endif
85
86#include <sys/param.h>
87#include <sys/systm.h>
65 *
66 * For receive filtering, the Starfire offers 16 perfect filter slots
67 * and a 512-bit hash table.
68 *
69 * The Starfire has no internal transceiver, relying instead on an
70 * external MII-based transceiver. Accessing registers on external
71 * PHYs is done through a special register map rather than with the
72 * usual bitbang MDIO method.

--- 8 unchanged lines hidden (view full) ---

81 */
82
83#ifdef HAVE_KERNEL_OPTION_HEADERS
84#include "opt_device_polling.h"
85#endif
86
87#include <sys/param.h>
88#include <sys/systm.h>
88#include <sys/sockio.h>
89#include <sys/mbuf.h>
90#include <sys/malloc.h>
89#include <sys/bus.h>
90#include <sys/endian.h>
91#include <sys/kernel.h>
91#include <sys/kernel.h>
92#include <sys/malloc.h>
93#include <sys/mbuf.h>
94#include <sys/rman.h>
92#include <sys/module.h>
93#include <sys/socket.h>
95#include <sys/module.h>
96#include <sys/socket.h>
97#include <sys/sockio.h>
98#include <sys/sysctl.h>
99#include <sys/taskqueue.h>
94
100
101#include <net/bpf.h>
95#include <net/if.h>
96#include <net/if_arp.h>
97#include <net/ethernet.h>
98#include <net/if_dl.h>
99#include <net/if_media.h>
100#include <net/if_types.h>
102#include <net/if.h>
103#include <net/if_arp.h>
104#include <net/ethernet.h>
105#include <net/if_dl.h>
106#include <net/if_media.h>
107#include <net/if_types.h>
108#include <net/if_vlan_var.h>
101
109
102#include <net/bpf.h>
103
104#include <vm/vm.h> /* for vtophys */
105#include <vm/pmap.h> /* for vtophys */
106#include <machine/bus.h>
107#include <machine/resource.h>
108#include <sys/bus.h>
109#include <sys/rman.h>
110
111#include <dev/mii/mii.h>
112#include <dev/mii/miivar.h>
113
110#include <dev/mii/mii.h>
111#include <dev/mii/miivar.h>
112
114/* "device miibus" required. See GENERIC if you get errors here. */
115#include "miibus_if.h"
116
117#include <dev/pci/pcireg.h>
118#include <dev/pci/pcivar.h>
119
113#include <dev/pci/pcireg.h>
114#include <dev/pci/pcivar.h>
115
120#define SF_USEIOSPACE
116#include <machine/bus.h>
121
122#include <dev/sf/if_sfreg.h>
117
118#include <dev/sf/if_sfreg.h>
119#include <dev/sf/starfire_rx.h>
120#include <dev/sf/starfire_tx.h>
123
121
122/* "device miibus" required. See GENERIC if you get errors here. */
123#include "miibus_if.h"
124
124MODULE_DEPEND(sf, pci, 1, 1, 1);
125MODULE_DEPEND(sf, ether, 1, 1, 1);
126MODULE_DEPEND(sf, miibus, 1, 1, 1);
127
125MODULE_DEPEND(sf, pci, 1, 1, 1);
126MODULE_DEPEND(sf, ether, 1, 1, 1);
127MODULE_DEPEND(sf, miibus, 1, 1, 1);
128
129#undef SF_GFP_DEBUG
130#define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
131/* Define this to activate partial TCP/UDP checksum offload. */
132#undef SF_PARTIAL_CSUM_SUPPORT
133
128static struct sf_type sf_devs[] = {
134static struct sf_type sf_devs[] = {
129 { AD_VENDORID, AD_DEVICEID_STARFIRE,
130 "Adaptec AIC-6915 10/100BaseTX" },
131 { 0, 0, NULL }
135 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
136 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
137 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
138 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
139 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
140 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
141 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
142 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
143 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
144 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
145 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
146 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
147 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
148 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
132};
133
134static int sf_probe(device_t);
135static int sf_attach(device_t);
136static int sf_detach(device_t);
149};
150
151static int sf_probe(device_t);
152static int sf_attach(device_t);
153static int sf_detach(device_t);
154static int sf_shutdown(device_t);
155static int sf_suspend(device_t);
156static int sf_resume(device_t);
137static void sf_intr(void *);
157static void sf_intr(void *);
138static void sf_stats_update(void *);
158static void sf_tick(void *);
159static void sf_stats_update(struct sf_softc *);
160#ifndef __NO_STRICT_ALIGNMENT
161static __inline void sf_fixup_rx(struct mbuf *);
162#endif
139static void sf_rxeof(struct sf_softc *);
140static void sf_txeof(struct sf_softc *);
163static void sf_rxeof(struct sf_softc *);
164static void sf_txeof(struct sf_softc *);
141static int sf_encap(struct sf_softc *, struct sf_tx_bufdesc_type0 *,
142 struct mbuf *);
165static int sf_encap(struct sf_softc *, struct mbuf **);
143static void sf_start(struct ifnet *);
144static void sf_start_locked(struct ifnet *);
145static int sf_ioctl(struct ifnet *, u_long, caddr_t);
166static void sf_start(struct ifnet *);
167static void sf_start_locked(struct ifnet *);
168static int sf_ioctl(struct ifnet *, u_long, caddr_t);
169static void sf_download_fw(struct sf_softc *);
146static void sf_init(void *);
147static void sf_init_locked(struct sf_softc *);
148static void sf_stop(struct sf_softc *);
170static void sf_init(void *);
171static void sf_init_locked(struct sf_softc *);
172static void sf_stop(struct sf_softc *);
149static void sf_watchdog(struct ifnet *);
150static int sf_shutdown(device_t);
173static void sf_watchdog(struct sf_softc *);
151static int sf_ifmedia_upd(struct ifnet *);
174static int sf_ifmedia_upd(struct ifnet *);
152static void sf_ifmedia_upd_locked(struct ifnet *);
153static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
154static void sf_reset(struct sf_softc *);
175static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176static void sf_reset(struct sf_softc *);
177static int sf_dma_alloc(struct sf_softc *);
178static void sf_dma_free(struct sf_softc *);
155static int sf_init_rx_ring(struct sf_softc *);
156static void sf_init_tx_ring(struct sf_softc *);
179static int sf_init_rx_ring(struct sf_softc *);
180static void sf_init_tx_ring(struct sf_softc *);
157static int sf_newbuf(struct sf_softc *, struct sf_rx_bufdesc_type0 *,
158 struct mbuf *);
159static void sf_setmulti(struct sf_softc *);
160static int sf_setperf(struct sf_softc *, int, caddr_t);
181static int sf_newbuf(struct sf_softc *, int);
182static void sf_rxfilter(struct sf_softc *);
183static int sf_setperf(struct sf_softc *, int, uint8_t *);
161static int sf_sethash(struct sf_softc *, caddr_t, int);
162#ifdef notdef
184static int sf_sethash(struct sf_softc *, caddr_t, int);
185#ifdef notdef
163static int sf_setvlan(struct sf_softc *, int, u_int32_t);
186static int sf_setvlan(struct sf_softc *, int, uint32_t);
164#endif
165
187#endif
188
166static u_int8_t sf_read_eeprom(struct sf_softc *, int);
189static uint8_t sf_read_eeprom(struct sf_softc *, int);
167
168static int sf_miibus_readreg(device_t, int, int);
169static int sf_miibus_writereg(device_t, int, int, int);
170static void sf_miibus_statchg(device_t);
190
191static int sf_miibus_readreg(device_t, int, int);
192static int sf_miibus_writereg(device_t, int, int, int);
193static void sf_miibus_statchg(device_t);
194static void sf_link_task(void *, int);
171#ifdef DEVICE_POLLING
172static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
195#ifdef DEVICE_POLLING
196static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
173static void sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
174#endif
175
197#endif
198
176static u_int32_t csr_read_4(struct sf_softc *, int);
177static void csr_write_4(struct sf_softc *, int, u_int32_t);
199static uint32_t csr_read_4(struct sf_softc *, int);
200static void csr_write_4(struct sf_softc *, int, uint32_t);
178static void sf_txthresh_adjust(struct sf_softc *);
201static void sf_txthresh_adjust(struct sf_softc *);
202static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
203static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
204static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
179
205
180#ifdef SF_USEIOSPACE
181#define SF_RES SYS_RES_IOPORT
182#define SF_RID SF_PCI_LOIO
183#else
184#define SF_RES SYS_RES_MEMORY
185#define SF_RID SF_PCI_LOMEM
186#endif
187
188static device_method_t sf_methods[] = {
189 /* Device interface */
190 DEVMETHOD(device_probe, sf_probe),
191 DEVMETHOD(device_attach, sf_attach),
192 DEVMETHOD(device_detach, sf_detach),
193 DEVMETHOD(device_shutdown, sf_shutdown),
206static device_method_t sf_methods[] = {
207 /* Device interface */
208 DEVMETHOD(device_probe, sf_probe),
209 DEVMETHOD(device_attach, sf_attach),
210 DEVMETHOD(device_detach, sf_detach),
211 DEVMETHOD(device_shutdown, sf_shutdown),
212 DEVMETHOD(device_suspend, sf_suspend),
213 DEVMETHOD(device_resume, sf_resume),
194
195 /* bus interface */
196 DEVMETHOD(bus_print_child, bus_generic_print_child),
197 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
198
199 /* MII interface */
200 DEVMETHOD(miibus_readreg, sf_miibus_readreg),
201 DEVMETHOD(miibus_writereg, sf_miibus_writereg),
202 DEVMETHOD(miibus_statchg, sf_miibus_statchg),
203
214
215 /* bus interface */
216 DEVMETHOD(bus_print_child, bus_generic_print_child),
217 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
218
219 /* MII interface */
220 DEVMETHOD(miibus_readreg, sf_miibus_readreg),
221 DEVMETHOD(miibus_writereg, sf_miibus_writereg),
222 DEVMETHOD(miibus_statchg, sf_miibus_statchg),
223
204 { 0, 0 }
224 { NULL, NULL }
205};
206
207static driver_t sf_driver = {
208 "sf",
209 sf_methods,
210 sizeof(struct sf_softc),
211};
212
213static devclass_t sf_devclass;
214
215DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
216DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
217
218#define SF_SETBIT(sc, reg, x) \
219 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
220
221#define SF_CLRBIT(sc, reg, x) \
222 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
223
225};
226
227static driver_t sf_driver = {
228 "sf",
229 sf_methods,
230 sizeof(struct sf_softc),
231};
232
233static devclass_t sf_devclass;
234
235DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
236DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
237
238#define SF_SETBIT(sc, reg, x) \
239 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
240
241#define SF_CLRBIT(sc, reg, x) \
242 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
243
224static u_int32_t
225csr_read_4(sc, reg)
226 struct sf_softc *sc;
227 int reg;
244static uint32_t
245csr_read_4(struct sf_softc *sc, int reg)
228{
246{
229 u_int32_t val;
247 uint32_t val;
230
248
231#ifdef SF_USEIOSPACE
232 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
233 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
234#else
235 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
236#endif
249 if (sc->sf_restype == SYS_RES_MEMORY)
250 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
251 else {
252 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
253 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
254 }
237
255
238 return(val);
256 return (val);
239}
240
257}
258
241static u_int8_t
242sf_read_eeprom(sc, reg)
243 struct sf_softc *sc;
244 int reg;
259static uint8_t
260sf_read_eeprom(struct sf_softc *sc, int reg)
245{
261{
246 u_int8_t val;
262 uint8_t val;
247
248 val = (csr_read_4(sc, SF_EEADDR_BASE +
249 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
250
263
264 val = (csr_read_4(sc, SF_EEADDR_BASE +
265 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
266
251 return(val);
267 return (val);
252}
253
254static void
268}
269
270static void
255csr_write_4(sc, reg, val)
256 struct sf_softc *sc;
257 int reg;
258 u_int32_t val;
271csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
259{
272{
260#ifdef SF_USEIOSPACE
261 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
262 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
263#else
264 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
265#endif
273
274 if (sc->sf_restype == SYS_RES_MEMORY)
275 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
276 else {
277 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
278 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
279 }
266}
267
268/*
269 * Copy the address 'mac' into the perfect RX filter entry at
270 * offset 'idx.' The perfect filter only has 16 entries so do
271 * some sanity tests.
272 */
273static int
280}
281
282/*
283 * Copy the address 'mac' into the perfect RX filter entry at
284 * offset 'idx.' The perfect filter only has 16 entries so do
285 * some sanity tests.
286 */
287static int
274sf_setperf(sc, idx, mac)
275 struct sf_softc *sc;
276 int idx;
277 caddr_t mac;
288sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
278{
289{
279 u_int16_t *p;
280
281 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
290
291 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
282 return(EINVAL);
292 return (EINVAL);
283
284 if (mac == NULL)
293
294 if (mac == NULL)
285 return(EINVAL);
295 return (EINVAL);
286
296
287 p = (u_int16_t *)mac;
288
289 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
297 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
290 (idx * SF_RXFILT_PERFECT_SKIP), htons(p[2]));
298 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
291 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
299 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
292 (idx * SF_RXFILT_PERFECT_SKIP) + 4, htons(p[1]));
300 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
293 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
301 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
294 (idx * SF_RXFILT_PERFECT_SKIP) + 8, htons(p[0]));
302 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
295
303
296 return(0);
304 return (0);
297}
298
299/*
300 * Set the bit in the 512-bit hash table that corresponds to the
301 * specified mac address 'mac.' If 'prio' is nonzero, update the
302 * priority hash table instead of the filter hash table.
303 */
304static int
305}
306
307/*
308 * Set the bit in the 512-bit hash table that corresponds to the
309 * specified mac address 'mac.' If 'prio' is nonzero, update the
310 * priority hash table instead of the filter hash table.
311 */
312static int
305sf_sethash(sc, mac, prio)
306 struct sf_softc *sc;
307 caddr_t mac;
308 int prio;
313sf_sethash(struct sf_softc *sc, caddr_t mac, int prio)
309{
314{
310 u_int32_t h;
315 uint32_t h;
311
312 if (mac == NULL)
316
317 if (mac == NULL)
313 return(EINVAL);
318 return (EINVAL);
314
315 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
316
317 if (prio) {
318 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
319 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
320 } else {
321 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
322 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
323 }
324
319
320 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
321
322 if (prio) {
323 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
324 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
325 } else {
326 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
327 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
328 }
329
325 return(0);
330 return (0);
326}
327
328#ifdef notdef
329/*
330 * Set a VLAN tag in the receive filter.
331 */
332static int
331}
332
333#ifdef notdef
334/*
335 * Set a VLAN tag in the receive filter.
336 */
337static int
333sf_setvlan(sc, idx, vlan)
334 struct sf_softc *sc;
335 int idx;
336 u_int32_t vlan;
338sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
337{
339{
340
338 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
341 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
339 return(EINVAL);
342 return (EINVAL);
340
341 csr_write_4(sc, SF_RXFILT_HASH_BASE +
342 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
343
343
344 csr_write_4(sc, SF_RXFILT_HASH_BASE +
345 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
346
344 return(0);
347 return (0);
345}
346#endif
347
348static int
348}
349#endif
350
351static int
349sf_miibus_readreg(dev, phy, reg)
350 device_t dev;
351 int phy, reg;
352sf_miibus_readreg(device_t dev, int phy, int reg)
352{
353 struct sf_softc *sc;
354 int i;
353{
354 struct sf_softc *sc;
355 int i;
355 u_int32_t val = 0;
356 uint32_t val = 0;
356
357 sc = device_get_softc(dev);
358
359 for (i = 0; i < SF_TIMEOUT; i++) {
360 val = csr_read_4(sc, SF_PHY_REG(phy, reg));
357
358 sc = device_get_softc(dev);
359
360 for (i = 0; i < SF_TIMEOUT; i++) {
361 val = csr_read_4(sc, SF_PHY_REG(phy, reg));
361 if (val & SF_MII_DATAVALID)
362 if ((val & SF_MII_DATAVALID) != 0)
362 break;
363 }
364
365 if (i == SF_TIMEOUT)
363 break;
364 }
365
366 if (i == SF_TIMEOUT)
366 return(0);
367 return (0);
367
368
368 if ((val & 0x0000FFFF) == 0xFFFF)
369 return(0);
369 val &= SF_MII_DATAPORT;
370 if (val == 0xffff)
371 return (0);
370
372
371 return(val & 0x0000FFFF);
373 return (val);
372}
373
374static int
374}
375
376static int
375sf_miibus_writereg(dev, phy, reg, val)
376 device_t dev;
377 int phy, reg, val;
377sf_miibus_writereg(device_t dev, int phy, int reg, int val)
378{
379 struct sf_softc *sc;
380 int i;
381 int busy;
382
383 sc = device_get_softc(dev);
384
385 csr_write_4(sc, SF_PHY_REG(phy, reg), val);
386
387 for (i = 0; i < SF_TIMEOUT; i++) {
388 busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
378{
379 struct sf_softc *sc;
380 int i;
381 int busy;
382
383 sc = device_get_softc(dev);
384
385 csr_write_4(sc, SF_PHY_REG(phy, reg), val);
386
387 for (i = 0; i < SF_TIMEOUT; i++) {
388 busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
389 if (!(busy & SF_MII_BUSY))
389 if ((busy & SF_MII_BUSY) == 0)
390 break;
391 }
392
390 break;
391 }
392
393 return(0);
393 return (0);
394}
395
396static void
394}
395
396static void
397sf_miibus_statchg(dev)
398 device_t dev;
397sf_miibus_statchg(device_t dev)
399{
400 struct sf_softc *sc;
398{
399 struct sf_softc *sc;
401 struct mii_data *mii;
402
403 sc = device_get_softc(dev);
400
401 sc = device_get_softc(dev);
402 taskqueue_enqueue(taskqueue_swi, &sc->sf_link_task);
403}
404
405static void
406sf_link_task(void *arg, int pending)
407{
408 struct sf_softc *sc;
409 struct mii_data *mii;
410 struct ifnet *ifp;
411 uint32_t val;
412
413 sc = (struct sf_softc *)arg;
414
415 SF_LOCK(sc);
416
404 mii = device_get_softc(sc->sf_miibus);
417 mii = device_get_softc(sc->sf_miibus);
418 ifp = sc->sf_ifp;
419 if (mii == NULL || ifp == NULL ||
420 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
421 SF_UNLOCK(sc);
422 return;
423 }
405
424
406 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
407 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX);
425 if (mii->mii_media_status & IFM_ACTIVE) {
426 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
427 sc->sf_link = 1;
428 } else
429 sc->sf_link = 0;
430
431 val = csr_read_4(sc, SF_MACCFG_1);
432 val &= ~SF_MACCFG1_FULLDUPLEX;
433 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
434 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
435 val |= SF_MACCFG1_FULLDUPLEX;
408 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
436 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
409 } else {
410 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX);
437#ifdef notyet
438 /* Configure flow-control bits. */
439 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
440 IFM_ETH_RXPAUSE) != 0)
441 val |= SF_MACCFG1_RX_FLOWENB;
442 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
443 IFM_ETH_TXPAUSE) != 0)
444 val |= SF_MACCFG1_TX_FLOWENB;
445#endif
446 } else
411 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
447 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
412 }
448
449 /* Make sure to reset MAC to take changes effect. */
450 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
451 DELAY(1000);
452 csr_write_4(sc, SF_MACCFG_1, val);
453
454 val = csr_read_4(sc, SF_TIMER_CTL);
455 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
456 val |= SF_TIMER_TIMES_TEN;
457 else
458 val &= ~SF_TIMER_TIMES_TEN;
459 csr_write_4(sc, SF_TIMER_CTL, val);
460
461 SF_UNLOCK(sc);
413}
414
415static void
462}
463
464static void
416sf_setmulti(sc)
417 struct sf_softc *sc;
465sf_rxfilter(struct sf_softc *sc)
418{
419 struct ifnet *ifp;
420 int i;
421 struct ifmultiaddr *ifma;
466{
467 struct ifnet *ifp;
468 int i;
469 struct ifmultiaddr *ifma;
422 u_int8_t dummy[] = { 0, 0, 0, 0, 0, 0 };
470 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
471 uint32_t rxfilt;
423
424 ifp = sc->sf_ifp;
425
426 /* First zot all the existing filters. */
427 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
472
473 ifp = sc->sf_ifp;
474
475 /* First zot all the existing filters. */
476 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
428 sf_setperf(sc, i, (char *)&dummy);
429 for (i = SF_RXFILT_HASH_BASE;
430 i < (SF_RXFILT_HASH_MAX + 1); i += 4)
477 sf_setperf(sc, i, dummy);
478 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
479 i += sizeof(uint32_t))
431 csr_write_4(sc, i, 0);
480 csr_write_4(sc, i, 0);
432 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI);
433
481
434 /* Now program new ones. */
435 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
436 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI);
437 } else {
438 i = 1;
439 IF_ADDR_LOCK(ifp);
440 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
441 if (ifma->ifma_addr->sa_family != AF_LINK)
442 continue;
443 /*
444 * Program the first 15 multicast groups
445 * into the perfect filter. For all others,
446 * use the hash table.
447 */
448 if (i < SF_RXFILT_PERFECT_CNT) {
449 sf_setperf(sc, i,
450 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
451 i++;
452 continue;
453 }
482 rxfilt = csr_read_4(sc, SF_RXFILT);
483 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
484 if ((ifp->if_flags & IFF_BROADCAST) != 0)
485 rxfilt |= SF_RXFILT_BROAD;
486 if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
487 (ifp->if_flags & IFF_PROMISC) != 0) {
488 if ((ifp->if_flags & IFF_PROMISC) != 0)
489 rxfilt |= SF_RXFILT_PROMISC;
490 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
491 rxfilt |= SF_RXFILT_ALLMULTI;
492 goto done;
493 }
454
494
455 sf_sethash(sc,
456 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
495 /* Now program new ones. */
496 i = 1;
497 IF_ADDR_LOCK(ifp);
498 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
499 ifma_link) {
500 if (ifma->ifma_addr->sa_family != AF_LINK)
501 continue;
502 /*
503 * Program the first 15 multicast groups
504 * into the perfect filter. For all others,
505 * use the hash table.
506 */
507 if (i < SF_RXFILT_PERFECT_CNT) {
508 sf_setperf(sc, i,
509 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
510 i++;
511 continue;
457 }
512 }
458 IF_ADDR_UNLOCK(ifp);
513
514 sf_sethash(sc,
515 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
459 }
516 }
517 IF_ADDR_UNLOCK(ifp);
518
519done:
520 csr_write_4(sc, SF_RXFILT, rxfilt);
460}
461
462/*
463 * Set media options.
464 */
465static int
521}
522
523/*
524 * Set media options.
525 */
526static int
466sf_ifmedia_upd(ifp)
467 struct ifnet *ifp;
527sf_ifmedia_upd(struct ifnet *ifp)
468{
469 struct sf_softc *sc;
528{
529 struct sf_softc *sc;
530 struct mii_data *mii;
531 int error;
470
471 sc = ifp->if_softc;
472 SF_LOCK(sc);
532
533 sc = ifp->if_softc;
534 SF_LOCK(sc);
473 sf_ifmedia_upd_locked(ifp);
474 SF_UNLOCK(sc);
475
535
476 return(0);
477}
478
479static void
480sf_ifmedia_upd_locked(ifp)
481 struct ifnet *ifp;
482{
483 struct sf_softc *sc;
484 struct mii_data *mii;
485
486 sc = ifp->if_softc;
487 mii = device_get_softc(sc->sf_miibus);
536 mii = device_get_softc(sc->sf_miibus);
488 SF_LOCK_ASSERT(sc);
489 sc->sf_link = 0;
490 if (mii->mii_instance) {
491 struct mii_softc *miisc;
492 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
493 mii_phy_reset(miisc);
494 }
537 if (mii->mii_instance) {
538 struct mii_softc *miisc;
539 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
540 mii_phy_reset(miisc);
541 }
495 mii_mediachg(mii);
542 error = mii_mediachg(mii);
543 SF_UNLOCK(sc);
544
545 return (error);
496}
497
498/*
499 * Report current media status.
500 */
501static void
546}
547
548/*
549 * Report current media status.
550 */
551static void
502sf_ifmedia_sts(ifp, ifmr)
503 struct ifnet *ifp;
504 struct ifmediareq *ifmr;
552sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
505{
506 struct sf_softc *sc;
507 struct mii_data *mii;
508
509 sc = ifp->if_softc;
510 SF_LOCK(sc);
511 mii = device_get_softc(sc->sf_miibus);
512
513 mii_pollstat(mii);
514 ifmr->ifm_active = mii->mii_media_active;
515 ifmr->ifm_status = mii->mii_media_status;
516 SF_UNLOCK(sc);
517}
518
519static int
553{
554 struct sf_softc *sc;
555 struct mii_data *mii;
556
557 sc = ifp->if_softc;
558 SF_LOCK(sc);
559 mii = device_get_softc(sc->sf_miibus);
560
561 mii_pollstat(mii);
562 ifmr->ifm_active = mii->mii_media_active;
563 ifmr->ifm_status = mii->mii_media_status;
564 SF_UNLOCK(sc);
565}
566
567static int
520sf_ioctl(ifp, command, data)
521 struct ifnet *ifp;
522 u_long command;
523 caddr_t data;
568sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
524{
569{
525 struct sf_softc *sc = ifp->if_softc;
526 struct ifreq *ifr = (struct ifreq *) data;
570 struct sf_softc *sc;
571 struct ifreq *ifr;
527 struct mii_data *mii;
572 struct mii_data *mii;
528 int error = 0;
573 int error, mask;
529
574
530 switch(command) {
575 sc = ifp->if_softc;
576 ifr = (struct ifreq *)data;
577 error = 0;
578
579 switch (command) {
531 case SIOCSIFFLAGS:
532 SF_LOCK(sc);
533 if (ifp->if_flags & IFF_UP) {
580 case SIOCSIFFLAGS:
581 SF_LOCK(sc);
582 if (ifp->if_flags & IFF_UP) {
534 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
535 ifp->if_flags & IFF_PROMISC &&
536 !(sc->sf_if_flags & IFF_PROMISC)) {
537 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
538 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
539 !(ifp->if_flags & IFF_PROMISC) &&
540 sc->sf_if_flags & IFF_PROMISC) {
541 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
542 } else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
543 sf_init_locked(sc);
583 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
584 if ((ifp->if_flags ^ sc->sf_if_flags) &
585 (IFF_PROMISC | IFF_ALLMULTI))
586 sf_rxfilter(sc);
587 } else {
588 if (sc->sf_detach == 0)
589 sf_init_locked(sc);
590 }
544 } else {
591 } else {
545 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
592 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
546 sf_stop(sc);
547 }
548 sc->sf_if_flags = ifp->if_flags;
549 SF_UNLOCK(sc);
593 sf_stop(sc);
594 }
595 sc->sf_if_flags = ifp->if_flags;
596 SF_UNLOCK(sc);
550 error = 0;
551 break;
552 case SIOCADDMULTI:
553 case SIOCDELMULTI:
554 SF_LOCK(sc);
597 break;
598 case SIOCADDMULTI:
599 case SIOCDELMULTI:
600 SF_LOCK(sc);
555 sf_setmulti(sc);
601 sf_rxfilter(sc);
556 SF_UNLOCK(sc);
602 SF_UNLOCK(sc);
557 error = 0;
558 break;
559 case SIOCGIFMEDIA:
560 case SIOCSIFMEDIA:
561 mii = device_get_softc(sc->sf_miibus);
562 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
563 break;
564 case SIOCSIFCAP:
603 break;
604 case SIOCGIFMEDIA:
605 case SIOCSIFMEDIA:
606 mii = device_get_softc(sc->sf_miibus);
607 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
608 break;
609 case SIOCSIFCAP:
610 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
565#ifdef DEVICE_POLLING
611#ifdef DEVICE_POLLING
566 if (ifr->ifr_reqcap & IFCAP_POLLING &&
567 !(ifp->if_capenable & IFCAP_POLLING)) {
568 error = ether_poll_register(sf_poll, ifp);
569 if (error)
570 return(error);
571 SF_LOCK(sc);
572 /* Disable interrupts */
573 csr_write_4(sc, SF_IMR, 0x00000000);
574 ifp->if_capenable |= IFCAP_POLLING;
575 SF_UNLOCK(sc);
576 return (error);
577
612 if ((mask & IFCAP_POLLING) != 0) {
613 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
614 error = ether_poll_register(sf_poll, ifp);
615 if (error != 0)
616 break;
617 SF_LOCK(sc);
618 /* Disable interrupts. */
619 csr_write_4(sc, SF_IMR, 0);
620 ifp->if_capenable |= IFCAP_POLLING;
621 SF_UNLOCK(sc);
622 } else {
623 error = ether_poll_deregister(ifp);
624 /* Enable interrupts. */
625 SF_LOCK(sc);
626 csr_write_4(sc, SF_IMR, SF_INTRS);
627 ifp->if_capenable &= ~IFCAP_POLLING;
628 SF_UNLOCK(sc);
629 }
578 }
630 }
579 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
580 ifp->if_capenable & IFCAP_POLLING) {
581 error = ether_poll_deregister(ifp);
582 /* Enable interrupts. */
583 SF_LOCK(sc);
584 csr_write_4(sc, SF_IMR, SF_INTRS);
585 ifp->if_capenable &= ~IFCAP_POLLING;
586 SF_UNLOCK(sc);
587 return (error);
588 }
589#endif /* DEVICE_POLLING */
631#endif /* DEVICE_POLLING */
632 if ((mask & IFCAP_TXCSUM) != 0) {
633 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
634 SF_LOCK(sc);
635 ifp->if_capenable ^= IFCAP_TXCSUM;
636 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
637 ifp->if_hwassist |= SF_CSUM_FEATURES;
638 SF_SETBIT(sc, SF_GEN_ETH_CTL,
639 SF_ETHCTL_TXGFP_ENB);
640 } else {
641 ifp->if_hwassist &= ~SF_CSUM_FEATURES;
642 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
643 SF_ETHCTL_TXGFP_ENB);
644 }
645 SF_UNLOCK(sc);
646 }
647 }
648 if ((mask & IFCAP_RXCSUM) != 0) {
649 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
650 SF_LOCK(sc);
651 ifp->if_capenable ^= IFCAP_RXCSUM;
652 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
653 SF_SETBIT(sc, SF_GEN_ETH_CTL,
654 SF_ETHCTL_RXGFP_ENB);
655 else
656 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
657 SF_ETHCTL_RXGFP_ENB);
658 SF_UNLOCK(sc);
659 }
660 }
590 break;
591 default:
592 error = ether_ioctl(ifp, command, data);
593 break;
594 }
595
661 break;
662 default:
663 error = ether_ioctl(ifp, command, data);
664 break;
665 }
666
596 return(error);
667 return (error);
597}
598
599static void
668}
669
670static void
600sf_reset(sc)
601 struct sf_softc *sc;
671sf_reset(struct sf_softc *sc)
602{
672{
603 register int i;
673 int i;
604
605 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
606 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
607 DELAY(1000);
608 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
609
610 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
611

--- 12 unchanged lines hidden (view full) ---

624
625/*
626 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
627 * IDs against our list and return a device name if we find a match.
628 * We also check the subsystem ID so that we can identify exactly which
629 * NIC has been found, if possible.
630 */
631static int
674
675 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
676 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
677 DELAY(1000);
678 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
679
680 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
681

--- 12 unchanged lines hidden (view full) ---

694
695/*
696 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
697 * IDs against our list and return a device name if we find a match.
698 * We also check the subsystem ID so that we can identify exactly which
699 * NIC has been found, if possible.
700 */
701static int
632sf_probe(dev)
633 device_t dev;
702sf_probe(device_t dev)
634{
635 struct sf_type *t;
703{
704 struct sf_type *t;
705 uint16_t vid;
706 uint16_t did;
707 uint16_t sdid;
708 int i;
636
709
637 t = sf_devs;
710 vid = pci_get_vendor(dev);
711 did = pci_get_device(dev);
712 sdid = pci_get_subdevice(dev);
638
713
639 while(t->sf_name != NULL) {
640 if ((pci_get_vendor(dev) == t->sf_vid) &&
641 (pci_get_device(dev) == t->sf_did)) {
642 switch((pci_read_config(dev,
643 SF_PCI_SUBVEN_ID, 4) >> 16) & 0xFFFF) {
644 case AD_SUBSYSID_62011_REV0:
645 case AD_SUBSYSID_62011_REV1:
646 device_set_desc(dev,
647 "Adaptec ANA-62011 10/100BaseTX");
714 t = sf_devs;
715 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) {
716 if (vid == t->sf_vid && did == t->sf_did) {
717 if (sdid == t->sf_sdid) {
718 device_set_desc(dev, t->sf_sname);
648 return (BUS_PROBE_DEFAULT);
719 return (BUS_PROBE_DEFAULT);
649 case AD_SUBSYSID_62022:
650 device_set_desc(dev,
651 "Adaptec ANA-62022 10/100BaseTX");
652 return (BUS_PROBE_DEFAULT);
653 case AD_SUBSYSID_62044_REV0:
654 case AD_SUBSYSID_62044_REV1:
655 device_set_desc(dev,
656 "Adaptec ANA-62044 10/100BaseTX");
657 return (BUS_PROBE_DEFAULT);
658 case AD_SUBSYSID_62020:
659 device_set_desc(dev,
660 "Adaptec ANA-62020 10/100BaseFX");
661 return (BUS_PROBE_DEFAULT);
662 case AD_SUBSYSID_69011:
663 device_set_desc(dev,
664 "Adaptec ANA-69011 10/100BaseTX");
665 return (BUS_PROBE_DEFAULT);
666 default:
667 device_set_desc(dev, t->sf_name);
668 return (BUS_PROBE_DEFAULT);
669 break;
670 }
671 }
720 }
721 }
672 t++;
673 }
674
722 }
723
675 return(ENXIO);
724 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
725 /* unkown subdevice */
726 device_set_desc(dev, sf_devs[0].sf_name);
727 return (BUS_PROBE_DEFAULT);
728 }
729
730 return (ENXIO);
676}
677
678/*
679 * Attach the interface. Allocate softc structures, do ifmedia
680 * setup and ethernet/BPF attach.
681 */
682static int
731}
732
733/*
734 * Attach the interface. Allocate softc structures, do ifmedia
735 * setup and ethernet/BPF attach.
736 */
737static int
683sf_attach(dev)
684 device_t dev;
738sf_attach(device_t dev)
685{
686 int i;
687 struct sf_softc *sc;
688 struct ifnet *ifp;
739{
740 int i;
741 struct sf_softc *sc;
742 struct ifnet *ifp;
743 uint32_t reg;
689 int rid, error = 0;
744 int rid, error = 0;
690 u_char eaddr[6];
745 uint8_t eaddr[ETHER_ADDR_LEN];
691
692 sc = device_get_softc(dev);
693 sc->sf_dev = dev;
694
695 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
696 MTX_DEF);
746
747 sc = device_get_softc(dev);
748 sc->sf_dev = dev;
749
750 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
751 MTX_DEF);
752 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
753 TASK_INIT(&sc->sf_link_task, 0, sf_link_task, sc);
754
697 /*
698 * Map control/status registers.
699 */
700 pci_enable_busmaster(dev);
701
755 /*
756 * Map control/status registers.
757 */
758 pci_enable_busmaster(dev);
759
702 rid = SF_RID;
703 sc->sf_res = bus_alloc_resource_any(dev, SF_RES, &rid, RF_ACTIVE);
704
760 /*
761 * Prefer memory space register mapping over I/O space as the
762 * hardware requires lots of register access to get various
763 * producer/consumer index during Tx/Rx operation. However this
764 * requires large memory space(512K) to map the entire register
765 * space.
766 */
767 sc->sf_rid = PCIR_BAR(0);
768 sc->sf_restype = SYS_RES_MEMORY;
769 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
770 RF_ACTIVE);
705 if (sc->sf_res == NULL) {
771 if (sc->sf_res == NULL) {
706 device_printf(dev, "couldn't map ports\n");
707 error = ENXIO;
708 goto fail;
772 reg = pci_read_config(dev, PCIR_BAR(0), 4);
773 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
774 sc->sf_rid = PCIR_BAR(2);
775 else
776 sc->sf_rid = PCIR_BAR(1);
777 sc->sf_restype = SYS_RES_IOPORT;
778 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
779 &sc->sf_rid, RF_ACTIVE);
780 if (sc->sf_res == NULL) {
781 device_printf(dev, "couldn't allocate resources\n");
782 mtx_destroy(&sc->sf_mtx);
783 return (ENXIO);
784 }
709 }
785 }
786 if (bootverbose)
787 device_printf(dev, "using %s space register mapping\n",
788 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
710
789
711 sc->sf_btag = rman_get_bustag(sc->sf_res);
712 sc->sf_bhandle = rman_get_bushandle(sc->sf_res);
790 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
791 if (reg == 0) {
792 /*
793 * If cache line size is 0, MWI is not used at all, so set
794 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
795 * and 64.
796 */
797 reg = 16;
798 device_printf(dev, "setting PCI cache line size to %u\n", reg);
799 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
800 } else {
801 if (bootverbose)
802 device_printf(dev, "PCI cache line size : %u\n", reg);
803 }
804 /* Enable MWI. */
805 reg = pci_read_config(dev, PCIR_COMMAND, 2);
806 reg |= PCIM_CMD_MWRICEN;
807 pci_write_config(dev, PCIR_COMMAND, reg, 2);
713
808
714 /* Allocate interrupt */
809 /* Allocate interrupt. */
715 rid = 0;
716 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
717 RF_SHAREABLE | RF_ACTIVE);
718
719 if (sc->sf_irq == NULL) {
720 device_printf(dev, "couldn't map interrupt\n");
721 error = ENXIO;
722 goto fail;
723 }
724
810 rid = 0;
811 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
812 RF_SHAREABLE | RF_ACTIVE);
813
814 if (sc->sf_irq == NULL) {
815 device_printf(dev, "couldn't map interrupt\n");
816 error = ENXIO;
817 goto fail;
818 }
819
725 callout_init_mtx(&sc->sf_stat_callout, &sc->sf_mtx, 0);
820 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
821 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
823 sf_sysctl_stats, "I", "Statistics");
726
824
825 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
826 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
828 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
829 "sf interrupt moderation");
830 /* Pull in device tunables. */
831 sc->sf_int_mod = SF_IM_DEFAULT;
832 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
833 "int_mod", &sc->sf_int_mod);
834 if (error == 0) {
835 if (sc->sf_int_mod < SF_IM_MIN ||
836 sc->sf_int_mod > SF_IM_MAX) {
837 device_printf(dev, "int_mod value out of range; "
838 "using default: %d\n", SF_IM_DEFAULT);
839 sc->sf_int_mod = SF_IM_DEFAULT;
840 }
841 }
842
727 /* Reset the adapter. */
728 sf_reset(sc);
729
730 /*
731 * Get station address from the EEPROM.
732 */
733 for (i = 0; i < ETHER_ADDR_LEN; i++)
734 eaddr[i] =
735 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
736
843 /* Reset the adapter. */
844 sf_reset(sc);
845
846 /*
847 * Get station address from the EEPROM.
848 */
849 for (i = 0; i < ETHER_ADDR_LEN; i++)
850 eaddr[i] =
851 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
852
737 /* Allocate the descriptor queues. */
738 sc->sf_ldata = contigmalloc(sizeof(struct sf_list_data), M_DEVBUF,
739 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
740
741 if (sc->sf_ldata == NULL) {
742 device_printf(dev, "no memory for list buffers!\n");
743 error = ENXIO;
853 /* Allocate DMA resources. */
854 if (sf_dma_alloc(sc) != 0) {
855 error = ENOSPC;
744 goto fail;
745 }
746
856 goto fail;
857 }
858
747 bzero(sc->sf_ldata, sizeof(struct sf_list_data));
859 sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
748
749 ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
750 if (ifp == NULL) {
860
861 ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
862 if (ifp == NULL) {
751 device_printf(dev, "can not if_alloc()\n");
863 device_printf(dev, "can not allocate ifnet structure\n");
752 error = ENOSPC;
753 goto fail;
754 }
755
756 /* Do MII setup. */
864 error = ENOSPC;
865 goto fail;
866 }
867
868 /* Do MII setup. */
757 if (mii_phy_probe(dev, &sc->sf_miibus,
758 sf_ifmedia_upd, sf_ifmedia_sts)) {
869 if (mii_phy_probe(dev, &sc->sf_miibus, sf_ifmedia_upd,
870 sf_ifmedia_sts)) {
759 device_printf(dev, "MII without any phy!\n");
760 error = ENXIO;
761 goto fail;
762 }
763
764 ifp->if_softc = sc;
765 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
871 device_printf(dev, "MII without any phy!\n");
872 error = ENXIO;
873 goto fail;
874 }
875
876 ifp->if_softc = sc;
877 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
766 ifp->if_mtu = ETHERMTU;
767 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
768 ifp->if_ioctl = sf_ioctl;
769 ifp->if_start = sf_start;
878 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
879 ifp->if_ioctl = sf_ioctl;
880 ifp->if_start = sf_start;
770 ifp->if_watchdog = sf_watchdog;
771 ifp->if_init = sf_init;
772 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
773 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
774 IFQ_SET_READY(&ifp->if_snd);
881 ifp->if_init = sf_init;
882 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
883 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
884 IFQ_SET_READY(&ifp->if_snd);
885 /*
886 * With the help of firmware, AIC-6915 supports
887 * Tx/Rx TCP/UDP checksum offload.
888 */
889 ifp->if_hwassist = SF_CSUM_FEATURES;
890 ifp->if_capabilities = IFCAP_HWCSUM;
891
892 /*
893 * Call MI attach routine.
894 */
895 ether_ifattach(ifp, eaddr);
896
897 /* VLAN capability setup. */
898 ifp->if_capabilities |= IFCAP_VLAN_MTU;
775 ifp->if_capenable = ifp->if_capabilities;
776#ifdef DEVICE_POLLING
777 ifp->if_capabilities |= IFCAP_POLLING;
778#endif
899 ifp->if_capenable = ifp->if_capabilities;
900#ifdef DEVICE_POLLING
901 ifp->if_capabilities |= IFCAP_POLLING;
902#endif
779
780 /*
903 /*
781 * Call MI attach routine.
904 * Tell the upper layer(s) we support long frames.
905 * Must appear after the call to ether_ifattach() because
906 * ether_ifattach() sets ifi_hdrlen to the default value.
782 */
907 */
783 ether_ifattach(ifp, eaddr);
908 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
784
785 /* Hook interrupt last to avoid having to lock softc */
786 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
787 NULL, sf_intr, sc, &sc->sf_intrhand);
788
789 if (error) {
790 device_printf(dev, "couldn't set up irq\n");
791 ether_ifdetach(ifp);
792 goto fail;
793 }
794
795fail:
796 if (error)
797 sf_detach(dev);
798
909
910 /* Hook interrupt last to avoid having to lock softc */
911 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
912 NULL, sf_intr, sc, &sc->sf_intrhand);
913
914 if (error) {
915 device_printf(dev, "couldn't set up irq\n");
916 ether_ifdetach(ifp);
917 goto fail;
918 }
919
920fail:
921 if (error)
922 sf_detach(dev);
923
799 return(error);
924 return (error);
800}
801
802/*
803 * Shutdown hardware and free up resources. This can be called any
804 * time after the mutex has been initialized. It is called in both
805 * the error case in attach and the normal detach case so it needs
806 * to be careful about only freeing resources that have actually been
807 * allocated.
808 */
809static int
925}
926
927/*
928 * Shutdown hardware and free up resources. This can be called any
929 * time after the mutex has been initialized. It is called in both
930 * the error case in attach and the normal detach case so it needs
931 * to be careful about only freeing resources that have actually been
932 * allocated.
933 */
934static int
810sf_detach(dev)
811 device_t dev;
935sf_detach(device_t dev)
812{
813 struct sf_softc *sc;
814 struct ifnet *ifp;
815
816 sc = device_get_softc(dev);
936{
937 struct sf_softc *sc;
938 struct ifnet *ifp;
939
940 sc = device_get_softc(dev);
817 KASSERT(mtx_initialized(&sc->sf_mtx), ("sf mutex not initialized"));
818 ifp = sc->sf_ifp;
819
820#ifdef DEVICE_POLLING
941 ifp = sc->sf_ifp;
942
943#ifdef DEVICE_POLLING
821 if (ifp->if_capenable & IFCAP_POLLING)
944 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
822 ether_poll_deregister(ifp);
823#endif
945 ether_poll_deregister(ifp);
946#endif
824
947
825 /* These should only be active if attach succeeded */
826 if (device_is_attached(dev)) {
827 SF_LOCK(sc);
948 /* These should only be active if attach succeeded */
949 if (device_is_attached(dev)) {
950 SF_LOCK(sc);
951 sc->sf_detach = 1;
828 sf_stop(sc);
829 SF_UNLOCK(sc);
952 sf_stop(sc);
953 SF_UNLOCK(sc);
830 callout_drain(&sc->sf_stat_callout);
831 ether_ifdetach(ifp);
954 callout_drain(&sc->sf_co);
955 taskqueue_drain(taskqueue_swi, &sc->sf_link_task);
956 if (ifp != NULL)
957 ether_ifdetach(ifp);
832 }
958 }
833 if (sc->sf_miibus)
959 if (sc->sf_miibus) {
834 device_delete_child(dev, sc->sf_miibus);
960 device_delete_child(dev, sc->sf_miibus);
961 sc->sf_miibus = NULL;
962 }
835 bus_generic_detach(dev);
836
963 bus_generic_detach(dev);
964
837 if (sc->sf_intrhand)
965 if (sc->sf_intrhand != NULL)
838 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
966 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
839 if (sc->sf_irq)
967 if (sc->sf_irq != NULL)
840 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
968 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
841 if (sc->sf_res)
842 bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res);
969 if (sc->sf_res != NULL)
970 bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
971 sc->sf_res);
843
972
844 if (ifp)
973 sf_dma_free(sc);
974 if (ifp != NULL)
845 if_free(ifp);
846
975 if_free(ifp);
976
847 if (sc->sf_ldata)
848 contigfree(sc->sf_ldata, sizeof(struct sf_list_data), M_DEVBUF);
849
850 mtx_destroy(&sc->sf_mtx);
851
977 mtx_destroy(&sc->sf_mtx);
978
852 return(0);
979 return (0);
853}
854
980}
981
982struct sf_dmamap_arg {
983 bus_addr_t sf_busaddr;
984};
985
986static void
987sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
988{
989 struct sf_dmamap_arg *ctx;
990
991 if (error != 0)
992 return;
993 ctx = arg;
994 ctx->sf_busaddr = segs[0].ds_addr;
995}
996
855static int
997static int
856sf_init_rx_ring(sc)
857 struct sf_softc *sc;
998sf_dma_alloc(struct sf_softc *sc)
858{
999{
859 struct sf_list_data *ld;
860 int i;
1000 struct sf_dmamap_arg ctx;
1001 struct sf_txdesc *txd;
1002 struct sf_rxdesc *rxd;
1003 bus_addr_t lowaddr;
1004 bus_addr_t rx_ring_end, rx_cring_end;
1005 bus_addr_t tx_ring_end, tx_cring_end;
1006 int error, i;
861
1007
862 ld = sc->sf_ldata;
1008 lowaddr = BUS_SPACE_MAXADDR;
863
1009
864 bzero((char *)ld->sf_rx_dlist_big,
865 sizeof(struct sf_rx_bufdesc_type0) * SF_RX_DLIST_CNT);
866 bzero((char *)ld->sf_rx_clist,
867 sizeof(struct sf_rx_cmpdesc_type3) * SF_RX_CLIST_CNT);
1010again:
1011 /* Create parent DMA tag. */
1012 error = bus_dma_tag_create(
1013 bus_get_dma_tag(sc->sf_dev), /* parent */
1014 1, 0, /* alignment, boundary */
1015 lowaddr, /* lowaddr */
1016 BUS_SPACE_MAXADDR, /* highaddr */
1017 NULL, NULL, /* filter, filterarg */
1018 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1019 0, /* nsegments */
1020 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1021 0, /* flags */
1022 NULL, NULL, /* lockfunc, lockarg */
1023 &sc->sf_cdata.sf_parent_tag);
1024 if (error != 0) {
1025 device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1026 goto fail;
1027 }
1028 /* Create tag for Tx ring. */
1029 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1030 SF_RING_ALIGN, 0, /* alignment, boundary */
1031 BUS_SPACE_MAXADDR, /* lowaddr */
1032 BUS_SPACE_MAXADDR, /* highaddr */
1033 NULL, NULL, /* filter, filterarg */
1034 SF_TX_DLIST_SIZE, /* maxsize */
1035 1, /* nsegments */
1036 SF_TX_DLIST_SIZE, /* maxsegsize */
1037 0, /* flags */
1038 NULL, NULL, /* lockfunc, lockarg */
1039 &sc->sf_cdata.sf_tx_ring_tag);
1040 if (error != 0) {
1041 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1042 goto fail;
1043 }
868
1044
1045 /* Create tag for Tx completion ring. */
1046 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1047 SF_RING_ALIGN, 0, /* alignment, boundary */
1048 BUS_SPACE_MAXADDR, /* lowaddr */
1049 BUS_SPACE_MAXADDR, /* highaddr */
1050 NULL, NULL, /* filter, filterarg */
1051 SF_TX_CLIST_SIZE, /* maxsize */
1052 1, /* nsegments */
1053 SF_TX_CLIST_SIZE, /* maxsegsize */
1054 0, /* flags */
1055 NULL, NULL, /* lockfunc, lockarg */
1056 &sc->sf_cdata.sf_tx_cring_tag);
1057 if (error != 0) {
1058 device_printf(sc->sf_dev,
1059 "failed to create Tx completion ring DMA tag\n");
1060 goto fail;
1061 }
1062
1063 /* Create tag for Rx ring. */
1064 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1065 SF_RING_ALIGN, 0, /* alignment, boundary */
1066 BUS_SPACE_MAXADDR, /* lowaddr */
1067 BUS_SPACE_MAXADDR, /* highaddr */
1068 NULL, NULL, /* filter, filterarg */
1069 SF_RX_DLIST_SIZE, /* maxsize */
1070 1, /* nsegments */
1071 SF_RX_DLIST_SIZE, /* maxsegsize */
1072 0, /* flags */
1073 NULL, NULL, /* lockfunc, lockarg */
1074 &sc->sf_cdata.sf_rx_ring_tag);
1075 if (error != 0) {
1076 device_printf(sc->sf_dev,
1077 "failed to create Rx ring DMA tag\n");
1078 goto fail;
1079 }
1080
1081 /* Create tag for Rx completion ring. */
1082 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1083 SF_RING_ALIGN, 0, /* alignment, boundary */
1084 BUS_SPACE_MAXADDR, /* lowaddr */
1085 BUS_SPACE_MAXADDR, /* highaddr */
1086 NULL, NULL, /* filter, filterarg */
1087 SF_RX_CLIST_SIZE, /* maxsize */
1088 1, /* nsegments */
1089 SF_RX_CLIST_SIZE, /* maxsegsize */
1090 0, /* flags */
1091 NULL, NULL, /* lockfunc, lockarg */
1092 &sc->sf_cdata.sf_rx_cring_tag);
1093 if (error != 0) {
1094 device_printf(sc->sf_dev,
1095 "failed to create Rx completion ring DMA tag\n");
1096 goto fail;
1097 }
1098
1099 /* Create tag for Tx buffers. */
1100 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1101 1, 0, /* alignment, boundary */
1102 BUS_SPACE_MAXADDR, /* lowaddr */
1103 BUS_SPACE_MAXADDR, /* highaddr */
1104 NULL, NULL, /* filter, filterarg */
1105 MCLBYTES * SF_MAXTXSEGS, /* maxsize */
1106 SF_MAXTXSEGS, /* nsegments */
1107 MCLBYTES, /* maxsegsize */
1108 0, /* flags */
1109 NULL, NULL, /* lockfunc, lockarg */
1110 &sc->sf_cdata.sf_tx_tag);
1111 if (error != 0) {
1112 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1113 goto fail;
1114 }
1115
1116 /* Create tag for Rx buffers. */
1117 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1118 SF_RX_ALIGN, 0, /* alignment, boundary */
1119 BUS_SPACE_MAXADDR, /* lowaddr */
1120 BUS_SPACE_MAXADDR, /* highaddr */
1121 NULL, NULL, /* filter, filterarg */
1122 MCLBYTES, /* maxsize */
1123 1, /* nsegments */
1124 MCLBYTES, /* maxsegsize */
1125 0, /* flags */
1126 NULL, NULL, /* lockfunc, lockarg */
1127 &sc->sf_cdata.sf_rx_tag);
1128 if (error != 0) {
1129 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1130 goto fail;
1131 }
1132
1133 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1134 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1135 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1136 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1137 if (error != 0) {
1138 device_printf(sc->sf_dev,
1139 "failed to allocate DMA'able memory for Tx ring\n");
1140 goto fail;
1141 }
1142
1143 ctx.sf_busaddr = 0;
1144 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1145 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1146 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1147 if (error != 0 || ctx.sf_busaddr == 0) {
1148 device_printf(sc->sf_dev,
1149 "failed to load DMA'able memory for Tx ring\n");
1150 goto fail;
1151 }
1152 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1153
1154 /*
1155 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1156 */
1157 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1158 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1159 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1160 if (error != 0) {
1161 device_printf(sc->sf_dev,
1162 "failed to allocate DMA'able memory for "
1163 "Tx completion ring\n");
1164 goto fail;
1165 }
1166
1167 ctx.sf_busaddr = 0;
1168 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1169 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1170 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1171 if (error != 0 || ctx.sf_busaddr == 0) {
1172 device_printf(sc->sf_dev,
1173 "failed to load DMA'able memory for Tx completion ring\n");
1174 goto fail;
1175 }
1176 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1177
1178 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1179 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1180 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1181 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1182 if (error != 0) {
1183 device_printf(sc->sf_dev,
1184 "failed to allocate DMA'able memory for Rx ring\n");
1185 goto fail;
1186 }
1187
1188 ctx.sf_busaddr = 0;
1189 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1190 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1191 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1192 if (error != 0 || ctx.sf_busaddr == 0) {
1193 device_printf(sc->sf_dev,
1194 "failed to load DMA'able memory for Rx ring\n");
1195 goto fail;
1196 }
1197 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1198
1199 /*
1200 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1201 */
1202 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1203 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1204 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1205 if (error != 0) {
1206 device_printf(sc->sf_dev,
1207 "failed to allocate DMA'able memory for "
1208 "Rx completion ring\n");
1209 goto fail;
1210 }
1211
1212 ctx.sf_busaddr = 0;
1213 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1214 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1215 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1216 if (error != 0 || ctx.sf_busaddr == 0) {
1217 device_printf(sc->sf_dev,
1218 "failed to load DMA'able memory for Rx completion ring\n");
1219 goto fail;
1220 }
1221 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1222
1223 /*
1224 * Tx desciptor ring and Tx completion ring should be addressed in
1225 * the same 4GB space. The same rule applys to Rx ring and Rx
1226 * completion ring. Unfortunately there is no way to specify this
1227 * boundary restriction with bus_dma(9). So just try to allocate
1228 * without the restriction and check the restriction was satisfied.
1229 * If not, fall back to 32bit dma addressing mode which always
1230 * guarantees the restriction.
1231 */
1232 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1233 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1234 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1235 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1236 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1237 SF_ADDR_HI(tx_cring_end)) ||
1238 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1239 SF_ADDR_HI(tx_ring_end)) ||
1240 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1241 SF_ADDR_HI(rx_cring_end)) ||
1242 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1243 SF_ADDR_HI(rx_ring_end))) {
1244 device_printf(sc->sf_dev,
1245 "switching to 32bit DMA mode\n");
1246 sf_dma_free(sc);
1247 /* Limit DMA address space to 32bit and try again. */
1248 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1249 goto again;
1250 }
1251
1252 /* Create DMA maps for Tx buffers. */
1253 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1254 txd = &sc->sf_cdata.sf_txdesc[i];
1255 txd->tx_m = NULL;
1256 txd->ndesc = 0;
1257 txd->tx_dmamap = NULL;
1258 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1259 &txd->tx_dmamap);
1260 if (error != 0) {
1261 device_printf(sc->sf_dev,
1262 "failed to create Tx dmamap\n");
1263 goto fail;
1264 }
1265 }
1266 /* Create DMA maps for Rx buffers. */
1267 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1268 &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1269 device_printf(sc->sf_dev,
1270 "failed to create spare Rx dmamap\n");
1271 goto fail;
1272 }
869 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1273 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
870 if (sf_newbuf(sc, &ld->sf_rx_dlist_big[i], NULL) == ENOBUFS)
871 return(ENOBUFS);
1274 rxd = &sc->sf_cdata.sf_rxdesc[i];
1275 rxd->rx_m = NULL;
1276 rxd->rx_dmamap = NULL;
1277 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1278 &rxd->rx_dmamap);
1279 if (error != 0) {
1280 device_printf(sc->sf_dev,
1281 "failed to create Rx dmamap\n");
1282 goto fail;
1283 }
872 }
873
1284 }
1285
874 return(0);
1286fail:
1287 return (error);
875}
876
877static void
1288}
1289
1290static void
878sf_init_tx_ring(sc)
879 struct sf_softc *sc;
1291sf_dma_free(struct sf_softc *sc)
880{
1292{
881 struct sf_list_data *ld;
1293 struct sf_txdesc *txd;
1294 struct sf_rxdesc *rxd;
882 int i;
883
1295 int i;
1296
884 ld = sc->sf_ldata;
1297 /* Tx ring. */
1298 if (sc->sf_cdata.sf_tx_ring_tag) {
1299 if (sc->sf_cdata.sf_tx_ring_map)
1300 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1301 sc->sf_cdata.sf_tx_ring_map);
1302 if (sc->sf_cdata.sf_tx_ring_map &&
1303 sc->sf_rdata.sf_tx_ring)
1304 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1305 sc->sf_rdata.sf_tx_ring,
1306 sc->sf_cdata.sf_tx_ring_map);
1307 sc->sf_rdata.sf_tx_ring = NULL;
1308 sc->sf_cdata.sf_tx_ring_map = NULL;
1309 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1310 sc->sf_cdata.sf_tx_ring_tag = NULL;
1311 }
1312 /* Tx completion ring. */
1313 if (sc->sf_cdata.sf_tx_cring_tag) {
1314 if (sc->sf_cdata.sf_tx_cring_map)
1315 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1316 sc->sf_cdata.sf_tx_cring_map);
1317 if (sc->sf_cdata.sf_tx_cring_map &&
1318 sc->sf_rdata.sf_tx_cring)
1319 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1320 sc->sf_rdata.sf_tx_cring,
1321 sc->sf_cdata.sf_tx_cring_map);
1322 sc->sf_rdata.sf_tx_cring = NULL;
1323 sc->sf_cdata.sf_tx_cring_map = NULL;
1324 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1325 sc->sf_cdata.sf_tx_cring_tag = NULL;
1326 }
1327 /* Rx ring. */
1328 if (sc->sf_cdata.sf_rx_ring_tag) {
1329 if (sc->sf_cdata.sf_rx_ring_map)
1330 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1331 sc->sf_cdata.sf_rx_ring_map);
1332 if (sc->sf_cdata.sf_rx_ring_map &&
1333 sc->sf_rdata.sf_rx_ring)
1334 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1335 sc->sf_rdata.sf_rx_ring,
1336 sc->sf_cdata.sf_rx_ring_map);
1337 sc->sf_rdata.sf_rx_ring = NULL;
1338 sc->sf_cdata.sf_rx_ring_map = NULL;
1339 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1340 sc->sf_cdata.sf_rx_ring_tag = NULL;
1341 }
1342 /* Rx completion ring. */
1343 if (sc->sf_cdata.sf_rx_cring_tag) {
1344 if (sc->sf_cdata.sf_rx_cring_map)
1345 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1346 sc->sf_cdata.sf_rx_cring_map);
1347 if (sc->sf_cdata.sf_rx_cring_map &&
1348 sc->sf_rdata.sf_rx_cring)
1349 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1350 sc->sf_rdata.sf_rx_cring,
1351 sc->sf_cdata.sf_rx_cring_map);
1352 sc->sf_rdata.sf_rx_cring = NULL;
1353 sc->sf_cdata.sf_rx_cring_map = NULL;
1354 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1355 sc->sf_cdata.sf_rx_cring_tag = NULL;
1356 }
1357 /* Tx buffers. */
1358 if (sc->sf_cdata.sf_tx_tag) {
1359 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1360 txd = &sc->sf_cdata.sf_txdesc[i];
1361 if (txd->tx_dmamap) {
1362 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1363 txd->tx_dmamap);
1364 txd->tx_dmamap = NULL;
1365 }
1366 }
1367 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1368 sc->sf_cdata.sf_tx_tag = NULL;
1369 }
1370 /* Rx buffers. */
1371 if (sc->sf_cdata.sf_rx_tag) {
1372 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1373 rxd = &sc->sf_cdata.sf_rxdesc[i];
1374 if (rxd->rx_dmamap) {
1375 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1376 rxd->rx_dmamap);
1377 rxd->rx_dmamap = NULL;
1378 }
1379 }
1380 if (sc->sf_cdata.sf_rx_sparemap) {
1381 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1382 sc->sf_cdata.sf_rx_sparemap);
1383 sc->sf_cdata.sf_rx_sparemap = 0;
1384 }
1385 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1386 sc->sf_cdata.sf_rx_tag = NULL;
1387 }
885
1388
886 bzero((char *)ld->sf_tx_dlist,
887 sizeof(struct sf_tx_bufdesc_type0) * SF_TX_DLIST_CNT);
888 bzero((char *)ld->sf_tx_clist,
889 sizeof(struct sf_tx_cmpdesc_type0) * SF_TX_CLIST_CNT);
1389 if (sc->sf_cdata.sf_parent_tag) {
1390 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1391 sc->sf_cdata.sf_parent_tag = NULL;
1392 }
1393}
890
1394
891 for (i = 0; i < SF_TX_DLIST_CNT; i++)
892 ld->sf_tx_dlist[i].sf_id = SF_TX_BUFDESC_ID;
893 for (i = 0; i < SF_TX_CLIST_CNT; i++)
894 ld->sf_tx_clist[i].sf_type = SF_TXCMPTYPE_TX;
1395static int
1396sf_init_rx_ring(struct sf_softc *sc)
1397{
1398 struct sf_ring_data *rd;
1399 int i;
895
1400
896 ld->sf_tx_dlist[SF_TX_DLIST_CNT - 1].sf_end = 1;
897 sc->sf_tx_cnt = 0;
1401 sc->sf_cdata.sf_rxc_cons = 0;
1402
1403 rd = &sc->sf_rdata;
1404 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1405 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1406
1407 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1408 if (sf_newbuf(sc, i) != 0)
1409 return (ENOBUFS);
1410 }
1411
1412 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1413 sc->sf_cdata.sf_rx_cring_map,
1414 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1415 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1416 sc->sf_cdata.sf_rx_ring_map,
1417 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418
1419 return (0);
898}
899
1420}
1421
1422static void
1423sf_init_tx_ring(struct sf_softc *sc)
1424{
1425 struct sf_ring_data *rd;
1426 int i;
1427
1428 sc->sf_cdata.sf_tx_prod = 0;
1429 sc->sf_cdata.sf_tx_cnt = 0;
1430 sc->sf_cdata.sf_txc_cons = 0;
1431
1432 rd = &sc->sf_rdata;
1433 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1434 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1435 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1436 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1437 sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1438 sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1439 }
1440 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1441
1442 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1443 sc->sf_cdata.sf_tx_ring_map,
1444 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1445 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1446 sc->sf_cdata.sf_tx_cring_map,
1447 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1448}
1449
1450/*
1451 * Initialize an RX descriptor and attach an MBUF cluster.
1452 */
900static int
1453static int
901sf_newbuf(sc, c, m)
902 struct sf_softc *sc;
903 struct sf_rx_bufdesc_type0 *c;
904 struct mbuf *m;
1454sf_newbuf(struct sf_softc *sc, int idx)
905{
1455{
906 struct mbuf *m_new = NULL;
1456 struct sf_rx_rdesc *desc;
1457 struct sf_rxdesc *rxd;
1458 struct mbuf *m;
1459 bus_dma_segment_t segs[1];
1460 bus_dmamap_t map;
1461 int nsegs;
907
1462
908 if (m == NULL) {
909 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
910 if (m_new == NULL)
911 return(ENOBUFS);
1463 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1464 if (m == NULL)
1465 return (ENOBUFS);
1466 m->m_len = m->m_pkthdr.len = MCLBYTES;
1467 m_adj(m, sizeof(uint32_t));
912
1468
913 MCLGET(m_new, M_DONTWAIT);
914 if (!(m_new->m_flags & M_EXT)) {
915 m_freem(m_new);
916 return(ENOBUFS);
917 }
918 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
919 } else {
920 m_new = m;
921 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
922 m_new->m_data = m_new->m_ext.ext_buf;
1469 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1470 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1471 m_freem(m);
1472 return (ENOBUFS);
923 }
1473 }
1474 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
924
1475
925 m_adj(m_new, sizeof(u_int64_t));
1476 rxd = &sc->sf_cdata.sf_rxdesc[idx];
1477 if (rxd->rx_m != NULL) {
1478 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1479 BUS_DMASYNC_POSTREAD);
1480 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1481 }
1482 map = rxd->rx_dmamap;
1483 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1484 sc->sf_cdata.sf_rx_sparemap = map;
1485 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1486 BUS_DMASYNC_PREREAD);
1487 rxd->rx_m = m;
1488 desc = &sc->sf_rdata.sf_rx_ring[idx];
1489 desc->sf_addr = htole64(segs[0].ds_addr);
926
1490
927 c->sf_mbuf = m_new;
928 c->sf_addrlo = SF_RX_HOSTADDR(vtophys(mtod(m_new, caddr_t)));
929 c->sf_valid = 1;
1491 return (0);
1492}
930
1493
931 return(0);
1494#ifndef __NO_STRICT_ALIGNMENT
1495static __inline void
1496sf_fixup_rx(struct mbuf *m)
1497{
1498 int i;
1499 uint16_t *src, *dst;
1500
1501 src = mtod(m, uint16_t *);
1502 dst = src - 1;
1503
1504 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1505 *dst++ = *src++;
1506
1507 m->m_data -= ETHER_ALIGN;
932}
1508}
1509#endif
933
934/*
935 * The starfire is programmed to use 'normal' mode for packet reception,
936 * which means we use the consumer/producer model for both the buffer
937 * descriptor queue and the completion descriptor queue. The only problem
938 * with this is that it involves a lot of register accesses: we have to
939 * read the RX completion consumer and producer indexes and the RX buffer
940 * producer index, plus the RX completion consumer and RX buffer producer
941 * indexes have to be updated. It would have been easier if Adaptec had
942 * put each index in a separate register, especially given that the damn
943 * NIC has a 512K register space.
944 *
945 * In spite of all the lovely features that Adaptec crammed into the 6915,
946 * it is marred by one truly stupid design flaw, which is that receive
947 * buffer addresses must be aligned on a longword boundary. This forces
948 * the packet payload to be unaligned, which is suboptimal on the x86 and
949 * completely unuseable on the Alpha. Our only recourse is to copy received
950 * packets into properly aligned buffers before handing them off.
951 */
1510
1511/*
1512 * The starfire is programmed to use 'normal' mode for packet reception,
1513 * which means we use the consumer/producer model for both the buffer
1514 * descriptor queue and the completion descriptor queue. The only problem
1515 * with this is that it involves a lot of register accesses: we have to
1516 * read the RX completion consumer and producer indexes and the RX buffer
1517 * producer index, plus the RX completion consumer and RX buffer producer
1518 * indexes have to be updated. It would have been easier if Adaptec had
1519 * put each index in a separate register, especially given that the damn
1520 * NIC has a 512K register space.
1521 *
1522 * In spite of all the lovely features that Adaptec crammed into the 6915,
1523 * it is marred by one truly stupid design flaw, which is that receive
1524 * buffer addresses must be aligned on a longword boundary. This forces
1525 * the packet payload to be unaligned, which is suboptimal on the x86 and
1526 * completely unuseable on the Alpha. Our only recourse is to copy received
1527 * packets into properly aligned buffers before handing them off.
1528 */
952
953static void
1529static void
954sf_rxeof(sc)
955 struct sf_softc *sc;
1530sf_rxeof(struct sf_softc *sc)
956{
957 struct mbuf *m;
958 struct ifnet *ifp;
1531{
1532 struct mbuf *m;
1533 struct ifnet *ifp;
959 struct sf_rx_bufdesc_type0 *desc;
960 struct sf_rx_cmpdesc_type3 *cur_rx;
961 u_int32_t rxcons, rxprod;
962 int cmpprodidx, cmpconsidx, bufprodidx;
1534 struct sf_rxdesc *rxd;
1535 struct sf_rx_rcdesc *cur_cmp;
1536 int cons, eidx, prog;
1537 uint32_t status, status2;
963
964 SF_LOCK_ASSERT(sc);
965
966 ifp = sc->sf_ifp;
967
1538
1539 SF_LOCK_ASSERT(sc);
1540
1541 ifp = sc->sf_ifp;
1542
968 rxcons = csr_read_4(sc, SF_CQ_CONSIDX);
969 rxprod = csr_read_4(sc, SF_RXDQ_PTR_Q1);
970 cmpprodidx = SF_IDX_LO(csr_read_4(sc, SF_CQ_PRODIDX));
971 cmpconsidx = SF_IDX_LO(rxcons);
972 bufprodidx = SF_IDX_LO(rxprod);
1543 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1544 sc->sf_cdata.sf_rx_ring_map,
1545 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1546 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1547 sc->sf_cdata.sf_rx_cring_map,
1548 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
973
1549
974 while (cmpconsidx != cmpprodidx) {
975 struct mbuf *m0;
976
1550 /*
1551 * To reduce register access, directly read Receive completion
1552 * queue entry.
1553 */
1554 eidx = 0;
1555 prog = 0;
1556 for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) {
1557 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1558 status = le32toh(cur_cmp->sf_rx_status1);
1559 if (status == 0)
1560 break;
977#ifdef DEVICE_POLLING
1561#ifdef DEVICE_POLLING
978 if (ifp->if_capenable & IFCAP_POLLING) {
1562 if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
979 if (sc->rxcycles <= 0)
980 break;
981 sc->rxcycles--;
982 }
983#endif
1563 if (sc->rxcycles <= 0)
1564 break;
1565 sc->rxcycles--;
1566 }
1567#endif
1568 prog++;
1569 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1570 rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1571 m = rxd->rx_m;
984
1572
985 cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx];
986 desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx];
987 m = desc->sf_mbuf;
988 SF_INC(cmpconsidx, SF_RX_CLIST_CNT);
989 SF_INC(bufprodidx, SF_RX_DLIST_CNT);
990
991 if (!(cur_rx->sf_status1 & SF_RXSTAT1_OK)) {
992 ifp->if_ierrors++;
993 sf_newbuf(sc, desc, m);
1573 /*
1574 * Note, if_ipackets and if_ierrors counters
1575 * are handled in sf_stats_update().
1576 */
1577 if ((status & SF_RXSTAT1_OK) == 0) {
1578 cur_cmp->sf_rx_status1 = 0;
994 continue;
995 }
996
1579 continue;
1580 }
1581
997 m0 = m_devget(mtod(m, char *), cur_rx->sf_len, ETHER_ALIGN,
998 ifp, NULL);
999 sf_newbuf(sc, desc, m);
1000 if (m0 == NULL) {
1001 ifp->if_ierrors++;
1582 if (sf_newbuf(sc, eidx) != 0) {
1583 ifp->if_iqdrops++;
1584 cur_cmp->sf_rx_status1 = 0;
1002 continue;
1003 }
1585 continue;
1586 }
1004 m = m0;
1005
1587
1006 ifp->if_ipackets++;
1588 /* AIC-6915 supports TCP/UDP checksum offload. */
1589 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1590 status2 = le32toh(cur_cmp->sf_rx_status2);
1591 /*
1592 * Sometimes AIC-6915 generates an interrupt to
1593 * warn RxGFP stall with bad checksum bit set
1594 * in status word. I'm not sure what conditioan
1595 * triggers it but recevied packet's checksum
1596 * was correct even though AIC-6915 does not
1597 * agree on this. This may be an indication of
1598 * firmware bug. To fix the issue, do not rely
1599 * on bad checksum bit in status word and let
1600 * upper layer verify integrity of received
1601 * frame.
1602 * Another nice feature of AIC-6915 is hardware
1603 * assistance of checksum calculation by
1604 * providing partial checksum value for received
1605 * frame. The partial checksum value can be used
1606 * to accelerate checksum computation for
1607 * fragmented TCP/UDP packets. Upper network
1608 * stack already takes advantage of the partial
1609 * checksum value in IP reassembly stage. But
1610 * I'm not sure the correctness of the partial
1611 * hardware checksum assistance as frequent
1612 * RxGFP stalls are seen on non-fragmented
1613 * frames. Due to the nature of the complexity
1614 * of checksum computation code in firmware it's
1615 * possible to see another bug in RxGFP so
1616 * ignore checksum assistance for fragmented
1617 * frames. This can be changed in future.
1618 */
1619 if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1620 if ((status2 & (SF_RXSTAT2_TCP |
1621 SF_RXSTAT2_UDP)) != 0) {
1622 if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1623 m->m_pkthdr.csum_flags =
1624 CSUM_DATA_VALID |
1625 CSUM_PSEUDO_HDR;
1626 m->m_pkthdr.csum_data = 0xffff;
1627 }
1628 }
1629 }
1630#ifdef SF_PARTIAL_CSUM_SUPPORT
1631 else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1632 if ((status2 & (SF_RXSTAT2_TCP |
1633 SF_RXSTAT2_UDP)) != 0) {
1634 if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1635 m->m_pkthdr.csum_flags =
1636 CSUM_DATA_VALID;
1637 m->m_pkthdr.csum_data =
1638 (status &
1639 SF_RX_CMPDESC_CSUM2);
1640 }
1641 }
1642 }
1643#endif
1644 }
1645
1646 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1647#ifndef __NO_STRICT_ALIGNMENT
1648 sf_fixup_rx(m);
1649#endif
1650 m->m_pkthdr.rcvif = ifp;
1651
1007 SF_UNLOCK(sc);
1008 (*ifp->if_input)(ifp, m);
1009 SF_LOCK(sc);
1652 SF_UNLOCK(sc);
1653 (*ifp->if_input)(ifp, m);
1654 SF_LOCK(sc);
1655
1656 /* Clear completion status. */
1657 cur_cmp->sf_rx_status1 = 0;
1010 }
1011
1658 }
1659
1012 csr_write_4(sc, SF_CQ_CONSIDX,
1013 (rxcons & ~SF_CQ_CONSIDX_RXQ1) | cmpconsidx);
1014 csr_write_4(sc, SF_RXDQ_PTR_Q1,
1015 (rxprod & ~SF_RXDQ_PRODIDX) | bufprodidx);
1660 if (prog > 0) {
1661 sc->sf_cdata.sf_rxc_cons = cons;
1662 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1663 sc->sf_cdata.sf_rx_ring_map,
1664 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1665 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1666 sc->sf_cdata.sf_rx_cring_map,
1667 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1668
1669 /* Update Rx completion Q1 consumer index. */
1670 csr_write_4(sc, SF_CQ_CONSIDX,
1671 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1672 (cons & SF_CQ_CONSIDX_RXQ1));
1673 /* Update Rx descriptor Q1 ptr. */
1674 csr_write_4(sc, SF_RXDQ_PTR_Q1,
1675 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1676 (eidx & SF_RXDQ_PRODIDX));
1677 }
1016}
1017
1018/*
1019 * Read the transmit status from the completion queue and release
1020 * mbufs. Note that the buffer descriptor index in the completion
1021 * descriptor is an offset from the start of the transmit buffer
1022 * descriptor list in bytes. This is important because the manual
1023 * gives the impression that it should match the producer/consumer
1024 * index, which is the offset in 8 byte blocks.
1025 */
1026static void
1678}
1679
1680/*
1681 * Read the transmit status from the completion queue and release
1682 * mbufs. Note that the buffer descriptor index in the completion
1683 * descriptor is an offset from the start of the transmit buffer
1684 * descriptor list in bytes. This is important because the manual
1685 * gives the impression that it should match the producer/consumer
1686 * index, which is the offset in 8 byte blocks.
1687 */
1688static void
1027sf_txeof(sc)
1028 struct sf_softc *sc;
1689sf_txeof(struct sf_softc *sc)
1029{
1690{
1030 int txcons, cmpprodidx, cmpconsidx;
1031 struct sf_tx_cmpdesc_type1 *cur_cmp;
1032 struct sf_tx_bufdesc_type0 *cur_tx;
1691 struct sf_txdesc *txd;
1692 struct sf_tx_rcdesc *cur_cmp;
1033 struct ifnet *ifp;
1693 struct ifnet *ifp;
1694 uint32_t status;
1695 int cons, idx, prod;
1034
1696
1035 ifp = sc->sf_ifp;
1036
1037 SF_LOCK_ASSERT(sc);
1697 SF_LOCK_ASSERT(sc);
1038 txcons = csr_read_4(sc, SF_CQ_CONSIDX);
1039 cmpprodidx = SF_IDX_HI(csr_read_4(sc, SF_CQ_PRODIDX));
1040 cmpconsidx = SF_IDX_HI(txcons);
1041
1698
1042 while (cmpconsidx != cmpprodidx) {
1043 cur_cmp = &sc->sf_ldata->sf_tx_clist[cmpconsidx];
1044 cur_tx = &sc->sf_ldata->sf_tx_dlist[cur_cmp->sf_index >> 7];
1699 ifp = sc->sf_ifp;
1045
1700
1046 if (cur_cmp->sf_txstat & SF_TXSTAT_TX_OK)
1047 ifp->if_opackets++;
1048 else {
1049 if (cur_cmp->sf_txstat & SF_TXSTAT_TX_UNDERRUN)
1050 sf_txthresh_adjust(sc);
1051 ifp->if_oerrors++;
1052 }
1701 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1702 sc->sf_cdata.sf_tx_cring_map,
1703 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1053
1704
1054 sc->sf_tx_cnt--;
1055 if (cur_tx->sf_mbuf != NULL) {
1056 m_freem(cur_tx->sf_mbuf);
1057 cur_tx->sf_mbuf = NULL;
1058 } else
1705 cons = sc->sf_cdata.sf_txc_cons;
1706 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1707 if (prod == cons)
1708 return;
1709
1710 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1711 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1712 status = le32toh(cur_cmp->sf_tx_status1);
1713 if (status == 0)
1059 break;
1714 break;
1060 SF_INC(cmpconsidx, SF_TX_CLIST_CNT);
1715 switch (status & SF_TX_CMPDESC_TYPE) {
1716 case SF_TXCMPTYPE_TX:
1717 /* Tx complete entry. */
1718 break;
1719 case SF_TXCMPTYPE_DMA:
1720 /* DMA complete entry. */
1721 idx = status & SF_TX_CMPDESC_IDX;
1722 idx = idx / sizeof(struct sf_tx_rdesc);
1723 /*
1724 * We don't need to check Tx status here.
1725 * SF_ISR_TX_LOFIFO intr would handle this.
1726 * Note, if_opackets, if_collisions and if_oerrors
1727 * counters are handled in sf_stats_update().
1728 */
1729 txd = &sc->sf_cdata.sf_txdesc[idx];
1730 if (txd->tx_m != NULL) {
1731 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1732 txd->tx_dmamap,
1733 BUS_DMASYNC_POSTWRITE);
1734 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1735 txd->tx_dmamap);
1736 m_freem(txd->tx_m);
1737 txd->tx_m = NULL;
1738 }
1739 sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1740 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1741 ("%s: Active Tx desc counter was garbled\n",
1742 __func__));
1743 txd->ndesc = 0;
1744 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1745 break;
1746 default:
1747 /* It should not happen. */
1748 device_printf(sc->sf_dev,
1749 "unknown Tx completion type : 0x%08x : %d : %d\n",
1750 status, cons, prod);
1751 break;
1752 }
1753 cur_cmp->sf_tx_status1 = 0;
1061 }
1062
1754 }
1755
1063 ifp->if_timer = 0;
1064 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1756 sc->sf_cdata.sf_txc_cons = cons;
1757 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1758 sc->sf_cdata.sf_tx_cring_map,
1759 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1065
1760
1761 if (sc->sf_cdata.sf_tx_cnt == 0)
1762 sc->sf_watchdog_timer = 0;
1763
1764 /* Update Tx completion consumer index. */
1066 csr_write_4(sc, SF_CQ_CONSIDX,
1765 csr_write_4(sc, SF_CQ_CONSIDX,
1067 (txcons & ~SF_CQ_CONSIDX_TXQ) |
1068 ((cmpconsidx << 16) & 0xFFFF0000));
1766 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1767 ((cons << 16) & 0xffff0000));
1069}
1070
1071static void
1768}
1769
1770static void
1072sf_txthresh_adjust(sc)
1073 struct sf_softc *sc;
1771sf_txthresh_adjust(struct sf_softc *sc)
1074{
1772{
1075 u_int32_t txfctl;
1076 u_int8_t txthresh;
1773 uint32_t txfctl;
1077
1774
1078 txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1079 txthresh = txfctl & SF_TXFRMCTL_TXTHRESH;
1080 if (txthresh < 0xFF) {
1081 txthresh++;
1775 device_printf(sc->sf_dev, "Tx underrun -- ");
1776 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1777 txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1778 /* Increase Tx threshold 256 bytes. */
1779 sc->sf_txthresh += 16;
1780 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1781 sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1082 txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1782 txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1083 txfctl |= txthresh;
1084#ifdef DIAGNOSTIC
1085 device_printf(sc->sf_dev, "tx underrun, increasing "
1086 "tx threshold to %d bytes\n",
1087 txthresh * 4);
1088#endif
1783 txfctl |= sc->sf_txthresh;
1784 printf("increasing Tx threshold to %d bytes\n",
1785 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1089 csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1786 csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1090 }
1787 } else
1788 printf("\n");
1091}
1092
1093#ifdef DEVICE_POLLING
1094static void
1095sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1096{
1789}
1790
1791#ifdef DEVICE_POLLING
1792static void
1793sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1794{
1097 struct sf_softc *sc = ifp->if_softc;
1795 struct sf_softc *sc;
1796 uint32_t status;
1098
1797
1798 sc = ifp->if_softc;
1099 SF_LOCK(sc);
1799 SF_LOCK(sc);
1100 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1101 sf_poll_locked(ifp, cmd, count);
1102 SF_UNLOCK(sc);
1103}
1104
1800
1105static void
1106sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1107{
1108 struct sf_softc *sc = ifp->if_softc;
1801 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1802 SF_UNLOCK(sc);
1803 return;
1804 }
1109
1805
1110 SF_LOCK_ASSERT(sc);
1111
1112 sc->rxcycles = count;
1113 sf_rxeof(sc);
1114 sf_txeof(sc);
1115 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1116 sf_start_locked(ifp);
1117
1118 if (cmd == POLL_AND_CHECK_STATUS) {
1806 sc->rxcycles = count;
1807 sf_rxeof(sc);
1808 sf_txeof(sc);
1809 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1810 sf_start_locked(ifp);
1811
1812 if (cmd == POLL_AND_CHECK_STATUS) {
1119 u_int32_t status;
1120
1813 /* Reading the ISR register clears all interrrupts. */
1121 status = csr_read_4(sc, SF_ISR);
1814 status = csr_read_4(sc, SF_ISR);
1122 if (status)
1123 csr_write_4(sc, SF_ISR, status);
1124
1815
1125 if (status & SF_ISR_TX_LOFIFO)
1126 sf_txthresh_adjust(sc);
1127
1128 if (status & SF_ISR_ABNORMALINTR) {
1129 if (status & SF_ISR_STATSOFLOW) {
1130 callout_stop(&sc->sf_stat_callout);
1816 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1817 if ((status & SF_ISR_STATSOFLOW) != 0)
1131 sf_stats_update(sc);
1818 sf_stats_update(sc);
1132 } else
1819 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1820 sf_txthresh_adjust(sc);
1821 else if ((status & SF_ISR_DMAERR) != 0) {
1822 device_printf(sc->sf_dev,
1823 "DMA error, resetting\n");
1133 sf_init_locked(sc);
1824 sf_init_locked(sc);
1825 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1826 sc->sf_statistics.tx_gfp_stall++;
1827#ifdef SF_GFP_DEBUG
1828 device_printf(sc->sf_dev,
1829 "TxGFP is not responding!\n");
1830#endif
1831 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1832 sc->sf_statistics.rx_gfp_stall++;
1833#ifdef SF_GFP_DEBUG
1834 device_printf(sc->sf_dev,
1835 "RxGFP is not responding!\n");
1836#endif
1837 }
1134 }
1135 }
1838 }
1839 }
1840
1841 SF_UNLOCK(sc);
1136}
1137#endif /* DEVICE_POLLING */
1138
1139static void
1842}
1843#endif /* DEVICE_POLLING */
1844
1845static void
1140sf_intr(arg)
1141 void *arg;
1846sf_intr(void *arg)
1142{
1143 struct sf_softc *sc;
1144 struct ifnet *ifp;
1847{
1848 struct sf_softc *sc;
1849 struct ifnet *ifp;
1145 u_int32_t status;
1850 uint32_t status;
1146
1851
1147 sc = arg;
1852 sc = (struct sf_softc *)arg;
1148 SF_LOCK(sc);
1149
1853 SF_LOCK(sc);
1854
1150 ifp = sc->sf_ifp;
1855 if (sc->sf_suspended != 0)
1856 goto done_locked;
1151
1857
1858 /* Reading the ISR register clears all interrrupts. */
1859 status = csr_read_4(sc, SF_ISR);
1860 if (status == 0 || status == 0xffffffff ||
1861 (status & SF_ISR_PCIINT_ASSERTED) == 0)
1862 goto done_locked;
1863
1864 ifp = sc->sf_ifp;
1152#ifdef DEVICE_POLLING
1865#ifdef DEVICE_POLLING
1153 if (ifp->if_capenable & IFCAP_POLLING) {
1154 SF_UNLOCK(sc);
1155 return;
1156 }
1866 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1867 goto done_locked;
1157#endif
1868#endif
1869 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1870 goto done_locked;
1158
1871
1159 if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) {
1160 SF_UNLOCK(sc);
1161 return;
1162 }
1163
1164 /* Disable interrupts. */
1165 csr_write_4(sc, SF_IMR, 0x00000000);
1166
1872 /* Disable interrupts. */
1873 csr_write_4(sc, SF_IMR, 0x00000000);
1874
1167 for (;;) {
1168 status = csr_read_4(sc, SF_ISR);
1169 if (status)
1170 csr_write_4(sc, SF_ISR, status);
1171
1172 if (!(status & SF_INTRS))
1173 break;
1174
1175 if (status & SF_ISR_RXDQ1_DMADONE)
1875 for (; (status & SF_INTRS) != 0;) {
1876 if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1176 sf_rxeof(sc);
1177
1877 sf_rxeof(sc);
1878
1178 if (status & SF_ISR_TX_TXDONE ||
1179 status & SF_ISR_TX_DMADONE ||
1180 status & SF_ISR_TX_QUEUEDONE)
1879 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1880 SF_ISR_TX_QUEUEDONE)) != 0)
1181 sf_txeof(sc);
1182
1881 sf_txeof(sc);
1882
1183 if (status & SF_ISR_TX_LOFIFO)
1184 sf_txthresh_adjust(sc);
1185
1186 if (status & SF_ISR_ABNORMALINTR) {
1187 if (status & SF_ISR_STATSOFLOW) {
1188 callout_stop(&sc->sf_stat_callout);
1883 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1884 if ((status & SF_ISR_STATSOFLOW) != 0)
1189 sf_stats_update(sc);
1885 sf_stats_update(sc);
1190 } else
1886 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1887 sf_txthresh_adjust(sc);
1888 else if ((status & SF_ISR_DMAERR) != 0) {
1889 device_printf(sc->sf_dev,
1890 "DMA error, resetting\n");
1191 sf_init_locked(sc);
1891 sf_init_locked(sc);
1892 break;
1893 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1894 sc->sf_statistics.sf_tx_gfp_stall++;
1895#ifdef SF_GFP_DEBUG
1896 device_printf(sc->sf_dev,
1897 "TxGFP is not responding!\n");
1898#endif
1899 }
1900 else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1901 sc->sf_statistics.sf_rx_gfp_stall++;
1902#ifdef SF_GFP_DEBUG
1903 device_printf(sc->sf_dev,
1904 "RxGFP is not responding!\n");
1905#endif
1906 }
1192 }
1907 }
1908 /* Reading the ISR register clears all interrrupts. */
1909 status = csr_read_4(sc, SF_ISR);
1193 }
1194
1195 /* Re-enable interrupts. */
1196 csr_write_4(sc, SF_IMR, SF_INTRS);
1197
1198 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1199 sf_start_locked(ifp);
1910 }
1911
1912 /* Re-enable interrupts. */
1913 csr_write_4(sc, SF_IMR, SF_INTRS);
1914
1915 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1916 sf_start_locked(ifp);
1200
1917done_locked:
1201 SF_UNLOCK(sc);
1202}
1203
1204static void
1918 SF_UNLOCK(sc);
1919}
1920
1921static void
1205sf_init(xsc)
1206 void *xsc;
1922sf_download_fw(struct sf_softc *sc)
1207{
1923{
1924 uint32_t gfpinst;
1925 int i, ndx;
1926 uint8_t *p;
1927
1928 /*
1929 * A FP instruction is composed of 48bits so we have to
1930 * write it with two parts.
1931 */
1932 p = txfwdata;
1933 ndx = 0;
1934 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1935 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1936 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1937 gfpinst = p[0] << 8 | p[1];
1938 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1939 p += SF_GFP_INST_BYTES;
1940 ndx += 2;
1941 }
1942 if (bootverbose)
1943 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1944
1945 p = rxfwdata;
1946 ndx = 0;
1947 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1948 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1949 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1950 gfpinst = p[0] << 8 | p[1];
1951 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1952 p += SF_GFP_INST_BYTES;
1953 ndx += 2;
1954 }
1955 if (bootverbose)
1956 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1957}
1958
1959static void
1960sf_init(void *xsc)
1961{
1208 struct sf_softc *sc;
1209
1962 struct sf_softc *sc;
1963
1210 sc = xsc;
1964 sc = (struct sf_softc *)xsc;
1211 SF_LOCK(sc);
1212 sf_init_locked(sc);
1213 SF_UNLOCK(sc);
1214}
1215
1216static void
1965 SF_LOCK(sc);
1966 sf_init_locked(sc);
1967 SF_UNLOCK(sc);
1968}
1969
1970static void
1217sf_init_locked(sc)
1218 struct sf_softc *sc;
1971sf_init_locked(struct sf_softc *sc)
1219{
1220 struct ifnet *ifp;
1221 struct mii_data *mii;
1972{
1973 struct ifnet *ifp;
1974 struct mii_data *mii;
1975 uint8_t eaddr[ETHER_ADDR_LEN];
1976 bus_addr_t addr;
1222 int i;
1223
1224 SF_LOCK_ASSERT(sc);
1225 ifp = sc->sf_ifp;
1226 mii = device_get_softc(sc->sf_miibus);
1227
1228 sf_stop(sc);
1977 int i;
1978
1979 SF_LOCK_ASSERT(sc);
1980 ifp = sc->sf_ifp;
1981 mii = device_get_softc(sc->sf_miibus);
1982
1983 sf_stop(sc);
1984 /* Reset the hardware to a known state. */
1229 sf_reset(sc);
1230
1231 /* Init all the receive filter registers */
1232 for (i = SF_RXFILT_PERFECT_BASE;
1985 sf_reset(sc);
1986
1987 /* Init all the receive filter registers */
1988 for (i = SF_RXFILT_PERFECT_BASE;
1233 i < (SF_RXFILT_HASH_MAX + 1); i += 4)
1989 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
1234 csr_write_4(sc, i, 0);
1235
1236 /* Empty stats counter registers. */
1990 csr_write_4(sc, i, 0);
1991
1992 /* Empty stats counter registers. */
1237 for (i = 0; i < sizeof(struct sf_stats)/sizeof(u_int32_t); i++)
1238 csr_write_4(sc, SF_STATS_BASE +
1239 (i + sizeof(u_int32_t)), 0);
1993 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
1994 csr_write_4(sc, i, 0);
1240
1995
1241 /* Init our MAC address */
1242 csr_write_4(sc, SF_PAR0, *(u_int32_t *)(&IF_LLADDR(sc->sf_ifp)[0]));
1243 csr_write_4(sc, SF_PAR1, *(u_int32_t *)(&IF_LLADDR(sc->sf_ifp)[4]));
1244 sf_setperf(sc, 0, IF_LLADDR(sc->sf_ifp));
1996 /* Init our MAC address. */
1997 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
1998 csr_write_4(sc, SF_PAR0,
1999 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2000 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2001 sf_setperf(sc, 0, eaddr);
1245
1246 if (sf_init_rx_ring(sc) == ENOBUFS) {
1247 device_printf(sc->sf_dev,
1248 "initialization failed: no memory for rx buffers\n");
1249 return;
1250 }
1251
1252 sf_init_tx_ring(sc);
1253
2002
2003 if (sf_init_rx_ring(sc) == ENOBUFS) {
2004 device_printf(sc->sf_dev,
2005 "initialization failed: no memory for rx buffers\n");
2006 return;
2007 }
2008
2009 sf_init_tx_ring(sc);
2010
1254 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL|SF_HASHMODE_WITHVLAN);
2011 /*
2012 * 16 perfect address filtering.
2013 * Hash only multicast destination address, Accept matching
2014 * frames regardless of VLAN ID.
2015 */
2016 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
1255
2017
1256 /* If we want promiscuous mode, set the allframes bit. */
1257 if (ifp->if_flags & IFF_PROMISC) {
1258 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
1259 } else {
1260 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
1261 }
1262
1263 if (ifp->if_flags & IFF_BROADCAST) {
1264 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_BROAD);
1265 } else {
1266 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_BROAD);
1267 }
1268
1269 /*
2018 /*
1270 * Load the multicast filter.
2019 * Set Rx filter.
1271 */
2020 */
1272 sf_setmulti(sc);
2021 sf_rxfilter(sc);
1273
2022
1274 /* Init the completion queue indexes */
2023 /* Init the completion queue indexes. */
1275 csr_write_4(sc, SF_CQ_CONSIDX, 0);
1276 csr_write_4(sc, SF_CQ_PRODIDX, 0);
1277
2024 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2025 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2026
1278 /* Init the RX completion queue */
1279 csr_write_4(sc, SF_RXCQ_CTL_1,
1280 vtophys(sc->sf_ldata->sf_rx_clist) & SF_RXCQ_ADDR);
1281 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_3);
2027 /* Init the RX completion queue. */
2028 addr = sc->sf_rdata.sf_rx_cring_paddr;
2029 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2030 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2031 if (SF_ADDR_HI(addr) != 0)
2032 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2033 /* Set RX completion queue type 2. */
2034 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2035 csr_write_4(sc, SF_RXCQ_CTL_2, 0);
1282
2036
1283 /* Init RX DMA control. */
1284 SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS);
2037 /*
2038 * Init RX DMA control.
2039 * default RxHighPriority Threshold,
2040 * default RxBurstSize, 128bytes.
2041 */
2042 SF_SETBIT(sc, SF_RXDMA_CTL,
2043 SF_RXDMA_REPORTBADPKTS |
2044 (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2045 SF_RXDMA_BURST);
1285
1286 /* Init the RX buffer descriptor queue. */
2046
2047 /* Init the RX buffer descriptor queue. */
1287 csr_write_4(sc, SF_RXDQ_ADDR_Q1,
1288 vtophys(sc->sf_ldata->sf_rx_dlist_big));
1289 csr_write_4(sc, SF_RXDQ_CTL_1, (MCLBYTES << 16) | SF_DESCSPACE_16BYTES);
2048 addr = sc->sf_rdata.sf_rx_ring_paddr;
2049 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2050 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2051
2052 /* Set RX queue buffer length. */
2053 csr_write_4(sc, SF_RXDQ_CTL_1,
2054 ((MCLBYTES - sizeof(uint32_t)) << 16) |
2055 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2056
2057 if (SF_ADDR_HI(addr) != 0)
2058 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
1290 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2059 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2060 csr_write_4(sc, SF_RXDQ_CTL_2, 0);
1291
1292 /* Init the TX completion queue */
2061
2062 /* Init the TX completion queue */
1293 csr_write_4(sc, SF_TXCQ_CTL,
1294 vtophys(sc->sf_ldata->sf_tx_clist) & SF_RXCQ_ADDR);
2063 addr = sc->sf_rdata.sf_tx_cring_paddr;
2064 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2065 if (SF_ADDR_HI(addr) != 0)
2066 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
1295
1296 /* Init the TX buffer descriptor queue. */
2067
2068 /* Init the TX buffer descriptor queue. */
1297 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO,
1298 vtophys(sc->sf_ldata->sf_tx_dlist));
1299 SF_SETBIT(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX);
2069 addr = sc->sf_rdata.sf_tx_ring_paddr;
2070 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2071 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2072 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2073 csr_write_4(sc, SF_TX_FRAMCTL,
2074 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
1300 csr_write_4(sc, SF_TXDQ_CTL,
2075 csr_write_4(sc, SF_TXDQ_CTL,
1301 SF_TXBUFDESC_TYPE0|SF_TXMINSPACE_128BYTES|SF_TXSKIPLEN_8BYTES);
1302 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_NODMACMP);
2076 SF_TXDMA_HIPRIO_THRESH << 24 |
2077 SF_TXSKIPLEN_0BYTES << 16 |
2078 SF_TXDDMA_BURST << 8 |
2079 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2080 if (SF_ADDR_HI(addr) != 0)
2081 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
1303
2082
2083 /* Set VLAN Type register. */
2084 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2085
2086 /* Set TxPause Timer. */
2087 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2088
1304 /* Enable autopadding of short TX frames. */
1305 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2089 /* Enable autopadding of short TX frames. */
2090 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2091 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2092 /* Make sure to reset MAC to take changes effect. */
2093 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2094 DELAY(1000);
2095 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
1306
2096
2097 /* Enable PCI bus master. */
2098 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2099
2100 /* Load StarFire firmware. */
2101 sf_download_fw(sc);
2102
2103 /* Intialize interrupt moderation. */
2104 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2105 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2106
1307#ifdef DEVICE_POLLING
1308 /* Disable interrupts if we are polling. */
2107#ifdef DEVICE_POLLING
2108 /* Disable interrupts if we are polling. */
1309 if (ifp->if_capenable & IFCAP_POLLING)
2109 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1310 csr_write_4(sc, SF_IMR, 0x00000000);
1311 else
1312#endif
2110 csr_write_4(sc, SF_IMR, 0x00000000);
2111 else
2112#endif
1313
1314 /* Enable interrupts. */
1315 csr_write_4(sc, SF_IMR, SF_INTRS);
1316 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
1317
1318 /* Enable the RX and TX engines. */
2113 /* Enable interrupts. */
2114 csr_write_4(sc, SF_IMR, SF_INTRS);
2115 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2116
2117 /* Enable the RX and TX engines. */
1319 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB|SF_ETHCTL_RXDMA_ENB);
1320 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TX_ENB|SF_ETHCTL_TXDMA_ENB);
2118 csr_write_4(sc, SF_GEN_ETH_CTL,
2119 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2120 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
1321
2121
1322 /*mii_mediachg(mii);*/
1323 sf_ifmedia_upd_locked(ifp);
2122 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2123 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2124 else
2125 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2126 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2127 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2128 else
2129 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
1324
2130
2131 sc->sf_link = 0;
2132 mii_mediachg(mii);
2133
1325 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1326 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1327
2134 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2135 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2136
1328 callout_reset(&sc->sf_stat_callout, hz, sf_stats_update, sc);
2137 callout_reset(&sc->sf_co, hz, sf_tick, sc);
1329}
1330
1331static int
2138}
2139
2140static int
1332sf_encap(sc, c, m_head)
1333 struct sf_softc *sc;
1334 struct sf_tx_bufdesc_type0 *c;
1335 struct mbuf *m_head;
2141sf_encap(struct sf_softc *sc, struct mbuf **m_head)
1336{
2142{
1337 int frag = 0;
1338 struct sf_frag *f = NULL;
2143 struct sf_txdesc *txd;
2144 struct sf_tx_rdesc *desc;
1339 struct mbuf *m;
2145 struct mbuf *m;
2146 bus_dmamap_t map;
2147 bus_dma_segment_t txsegs[SF_MAXTXSEGS];
2148 int error, i, nsegs, prod, si;
2149 int avail, nskip;
1340
2150
1341 m = m_head;
2151 SF_LOCK_ASSERT(sc);
1342
2152
1343 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1344 if (m->m_len != 0) {
1345 if (frag == SF_MAXFRAGS)
1346 break;
1347 f = &c->sf_frags[frag];
1348 if (frag == 0)
1349 f->sf_pktlen = m_head->m_pkthdr.len;
1350 f->sf_fraglen = m->m_len;
1351 f->sf_addr = vtophys(mtod(m, vm_offset_t));
1352 frag++;
2153 m = *m_head;
2154 prod = sc->sf_cdata.sf_tx_prod;
2155 txd = &sc->sf_cdata.sf_txdesc[prod];
2156 map = txd->tx_dmamap;
2157 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2158 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2159 if (error == EFBIG) {
2160 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS);
2161 if (m == NULL) {
2162 m_freem(*m_head);
2163 *m_head = NULL;
2164 return (ENOBUFS);
1353 }
2165 }
2166 *m_head = m;
2167 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2168 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2169 if (error != 0) {
2170 m_freem(*m_head);
2171 *m_head = NULL;
2172 return (error);
2173 }
2174 } else if (error != 0)
2175 return (error);
2176 if (nsegs == 0) {
2177 m_freem(*m_head);
2178 *m_head = NULL;
2179 return (EIO);
1354 }
1355
2180 }
2181
1356 if (m != NULL) {
1357 struct mbuf *m_new = NULL;
1358
1359 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1360 if (m_new == NULL) {
1361 if_printf(sc->sf_ifp, "no memory for tx list\n");
1362 return(1);
2182 /* Check number of available descriptors. */
2183 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2184 if (avail < nsegs) {
2185 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2186 return (ENOBUFS);
2187 }
2188 nskip = 0;
2189 if (prod + nsegs >= SF_TX_DLIST_CNT) {
2190 nskip = SF_TX_DLIST_CNT - prod - 1;
2191 if (avail < nsegs + nskip) {
2192 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2193 return (ENOBUFS);
1363 }
2194 }
2195 }
1364
2196
1365 if (m_head->m_pkthdr.len > MHLEN) {
1366 MCLGET(m_new, M_DONTWAIT);
1367 if (!(m_new->m_flags & M_EXT)) {
1368 m_freem(m_new);
1369 if_printf(sc->sf_ifp, "no memory for tx list\n");
1370 return(1);
1371 }
1372 }
1373 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1374 mtod(m_new, caddr_t));
1375 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1376 m_freem(m_head);
1377 m_head = m_new;
1378 f = &c->sf_frags[0];
1379 f->sf_fraglen = f->sf_pktlen = m_head->m_pkthdr.len;
1380 f->sf_addr = vtophys(mtod(m_head, caddr_t));
1381 frag = 1;
2197 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2198
2199 si = prod;
2200 for (i = 0; i < nsegs; i++) {
2201 desc = &sc->sf_rdata.sf_tx_ring[prod];
2202 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2203 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2204 desc->sf_tx_reserved = 0;
2205 desc->sf_addr = htole64(txsegs[i].ds_addr);
2206 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2207 /* Queue wraps! */
2208 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2209 prod = 0;
2210 } else
2211 SF_INC(prod, SF_TX_DLIST_CNT);
1382 }
2212 }
2213 /* Update producer index. */
2214 sc->sf_cdata.sf_tx_prod = prod;
2215 sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
1383
2216
1384 c->sf_mbuf = m_head;
1385 c->sf_id = SF_TX_BUFDESC_ID;
1386 c->sf_fragcnt = frag;
1387 c->sf_intr = 1;
1388 c->sf_caltcp = 0;
1389 c->sf_crcen = 1;
2217 desc = &sc->sf_rdata.sf_tx_ring[si];
2218 /* Check TDP/UDP checksum offload request. */
2219 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2220 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2221 desc->sf_tx_ctrl |=
2222 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
1390
2223
1391 return(0);
2224 txd->tx_dmamap = map;
2225 txd->tx_m = m;
2226 txd->ndesc = nsegs + nskip;
2227
2228 return (0);
1392}
1393
1394static void
2229}
2230
2231static void
1395sf_start(ifp)
1396 struct ifnet *ifp;
2232sf_start(struct ifnet *ifp)
1397{
1398 struct sf_softc *sc;
1399
1400 sc = ifp->if_softc;
1401 SF_LOCK(sc);
1402 sf_start_locked(ifp);
1403 SF_UNLOCK(sc);
1404}
1405
1406static void
2233{
2234 struct sf_softc *sc;
2235
2236 sc = ifp->if_softc;
2237 SF_LOCK(sc);
2238 sf_start_locked(ifp);
2239 SF_UNLOCK(sc);
2240}
2241
2242static void
1407sf_start_locked(ifp)
1408 struct ifnet *ifp;
2243sf_start_locked(struct ifnet *ifp)
1409{
1410 struct sf_softc *sc;
2244{
2245 struct sf_softc *sc;
1411 struct sf_tx_bufdesc_type0 *cur_tx = NULL;
1412 struct mbuf *m_head = NULL;
1413 int i, txprod;
2246 struct mbuf *m_head;
2247 int enq;
1414
1415 sc = ifp->if_softc;
1416 SF_LOCK_ASSERT(sc);
1417
2248
2249 sc = ifp->if_softc;
2250 SF_LOCK_ASSERT(sc);
2251
1418 if (!sc->sf_link && ifp->if_snd.ifq_len < 10)
2252 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2253 IFF_DRV_RUNNING || sc->sf_link == 0)
1419 return;
1420
2254 return;
2255
1421 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1422 return;
1423
1424 txprod = csr_read_4(sc, SF_TXDQ_PRODIDX);
1425 i = SF_IDX_HI(txprod) >> 4;
1426
1427 if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) {
1428 if_printf(ifp, "TX ring full, resetting\n");
1429 sf_init_locked(sc);
1430 txprod = csr_read_4(sc, SF_TXDQ_PRODIDX);
1431 i = SF_IDX_HI(txprod) >> 4;
1432 }
1433
1434 while(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf == NULL) {
1435 if (sc->sf_tx_cnt >= (SF_TX_DLIST_CNT - 5)) {
1436 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1437 cur_tx = NULL;
1438 break;
1439 }
2256 /*
2257 * Since we don't know when descriptor wrap occurrs in advance
2258 * limit available number of active Tx descriptor counter to be
2259 * higher than maximum number of DMA segments allowed in driver.
2260 */
2261 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2262 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
1440 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1441 if (m_head == NULL)
1442 break;
2263 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2264 if (m_head == NULL)
2265 break;
1443
1444 cur_tx = &sc->sf_ldata->sf_tx_dlist[i];
1445 if (sf_encap(sc, cur_tx, m_head)) {
2266 /*
2267 * Pack the data into the transmit ring. If we
2268 * don't have room, set the OACTIVE flag and wait
2269 * for the NIC to drain the ring.
2270 */
2271 if (sf_encap(sc, &m_head)) {
2272 if (m_head == NULL)
2273 break;
1446 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1447 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2274 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2275 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1448 cur_tx = NULL;
1449 break;
1450 }
1451
2276 break;
2277 }
2278
2279 enq++;
1452 /*
1453 * If there's a BPF listener, bounce a copy of this frame
1454 * to him.
1455 */
2280 /*
2281 * If there's a BPF listener, bounce a copy of this frame
2282 * to him.
2283 */
1456 BPF_MTAP(ifp, m_head);
1457
1458 SF_INC(i, SF_TX_DLIST_CNT);
1459 sc->sf_tx_cnt++;
1460 /*
1461 * Don't get the TX DMA queue get too full.
1462 */
1463 if (sc->sf_tx_cnt > 64)
1464 break;
2284 ETHER_BPF_MTAP(ifp, m_head);
1465 }
1466
2285 }
2286
1467 if (cur_tx == NULL)
1468 return;
2287 if (enq > 0) {
2288 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2289 sc->sf_cdata.sf_tx_ring_map,
2290 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2291 /* Kick transmit. */
2292 csr_write_4(sc, SF_TXDQ_PRODIDX,
2293 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
1469
2294
1470 /* Transmit */
1471 csr_write_4(sc, SF_TXDQ_PRODIDX,
1472 (txprod & ~SF_TXDQ_PRODIDX_HIPRIO) |
1473 ((i << 20) & 0xFFFF0000));
1474
1475 ifp->if_timer = 5;
2295 /* Set a timeout in case the chip goes out to lunch. */
2296 sc->sf_watchdog_timer = 5;
2297 }
1476}
1477
1478static void
2298}
2299
2300static void
1479sf_stop(sc)
1480 struct sf_softc *sc;
2301sf_stop(struct sf_softc *sc)
1481{
2302{
1482 int i;
2303 struct sf_txdesc *txd;
2304 struct sf_rxdesc *rxd;
1483 struct ifnet *ifp;
2305 struct ifnet *ifp;
2306 int i;
1484
1485 SF_LOCK_ASSERT(sc);
1486
1487 ifp = sc->sf_ifp;
1488
2307
2308 SF_LOCK_ASSERT(sc);
2309
2310 ifp = sc->sf_ifp;
2311
1489 callout_stop(&sc->sf_stat_callout);
2312 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2313 sc->sf_link = 0;
2314 callout_stop(&sc->sf_co);
2315 sc->sf_watchdog_timer = 0;
1490
2316
2317 /* Reading the ISR register clears all interrrupts. */
2318 csr_read_4(sc, SF_ISR);
2319 /* Disable further interrupts. */
2320 csr_write_4(sc, SF_IMR, 0);
2321
2322 /* Disable Tx/Rx egine. */
1491 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2323 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2324
1492 csr_write_4(sc, SF_CQ_CONSIDX, 0);
1493 csr_write_4(sc, SF_CQ_PRODIDX, 0);
1494 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
1495 csr_write_4(sc, SF_RXDQ_CTL_1, 0);
1496 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
1497 csr_write_4(sc, SF_TXCQ_CTL, 0);
1498 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
1499 csr_write_4(sc, SF_TXDQ_CTL, 0);
2325 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2326 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2327 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2328 csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2329 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2330 csr_write_4(sc, SF_TXCQ_CTL, 0);
2331 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2332 csr_write_4(sc, SF_TXDQ_CTL, 0);
1500 sf_reset(sc);
1501
2333
1502 sc->sf_link = 0;
1503
2334 /*
2335 * Free RX and TX mbufs still in the queues.
2336 */
1504 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2337 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1505 if (sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf != NULL) {
1506 m_freem(sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf);
1507 sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf = NULL;
2338 rxd = &sc->sf_cdata.sf_rxdesc[i];
2339 if (rxd->rx_m != NULL) {
2340 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2341 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2342 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2343 rxd->rx_dmamap);
2344 m_freem(rxd->rx_m);
2345 rxd->rx_m = NULL;
1508 }
2346 }
1509 }
1510
2347 }
1511 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2348 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1512 if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) {
1513 m_freem(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf);
1514 sc->sf_ldata->sf_tx_dlist[i].sf_mbuf = NULL;
2349 txd = &sc->sf_cdata.sf_txdesc[i];
2350 if (txd->tx_m != NULL) {
2351 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2352 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2353 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2354 txd->tx_dmamap);
2355 m_freem(txd->tx_m);
2356 txd->tx_m = NULL;
2357 txd->ndesc = 0;
1515 }
2358 }
1516 }
2359 }
2360}
1517
2361
1518 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
2362static void
2363sf_tick(void *xsc)
2364{
2365 struct sf_softc *sc;
2366 struct mii_data *mii;
2367
2368 sc = xsc;
2369 SF_LOCK_ASSERT(sc);
2370 mii = device_get_softc(sc->sf_miibus);
2371 mii_tick(mii);
2372 sf_stats_update(sc);
2373 sf_watchdog(sc);
2374 callout_reset(&sc->sf_co, hz, sf_tick, sc);
1519}
1520
1521/*
1522 * Note: it is important that this function not be interrupted. We
1523 * use a two-stage register access scheme: if we are interrupted in
1524 * between setting the indirect address register and reading from the
1525 * indirect data register, the contents of the address register could
1526 * be changed out from under us.
1527 */
1528static void
2375}
2376
2377/*
2378 * Note: it is important that this function not be interrupted. We
2379 * use a two-stage register access scheme: if we are interrupted in
2380 * between setting the indirect address register and reading from the
2381 * indirect data register, the contents of the address register could
2382 * be changed out from under us.
2383 */
2384static void
1529sf_stats_update(xsc)
1530 void *xsc;
2385sf_stats_update(struct sf_softc *sc)
1531{
2386{
1532 struct sf_softc *sc;
1533 struct ifnet *ifp;
2387 struct ifnet *ifp;
1534 struct mii_data *mii;
1535 struct sf_stats stats;
1536 u_int32_t *ptr;
2388 struct sf_stats now, *stats, *nstats;
1537 int i;
1538
2389 int i;
2390
1539 sc = xsc;
1540 SF_LOCK_ASSERT(sc);
2391 SF_LOCK_ASSERT(sc);
2392
1541 ifp = sc->sf_ifp;
2393 ifp = sc->sf_ifp;
1542 mii = device_get_softc(sc->sf_miibus);
2394 stats = &now;
1543
2395
1544 ptr = (u_int32_t *)&stats;
1545 for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++)
1546 ptr[i] = csr_read_4(sc, SF_STATS_BASE +
1547 (i + sizeof(u_int32_t)));
2396 stats->sf_tx_frames =
2397 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2398 stats->sf_tx_single_colls =
2399 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2400 stats->sf_tx_multi_colls =
2401 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2402 stats->sf_tx_crcerrs =
2403 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2404 stats->sf_tx_bytes =
2405 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2406 stats->sf_tx_deferred =
2407 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2408 stats->sf_tx_late_colls =
2409 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2410 stats->sf_tx_pause_frames =
2411 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2412 stats->sf_tx_control_frames =
2413 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2414 stats->sf_tx_excess_colls =
2415 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2416 stats->sf_tx_excess_defer =
2417 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2418 stats->sf_tx_mcast_frames =
2419 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2420 stats->sf_tx_bcast_frames =
2421 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2422 stats->sf_tx_frames_lost =
2423 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2424 stats->sf_rx_frames =
2425 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2426 stats->sf_rx_crcerrs =
2427 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2428 stats->sf_rx_alignerrs =
2429 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2430 stats->sf_rx_bytes =
2431 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2432 stats->sf_rx_pause_frames =
2433 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2434 stats->sf_rx_control_frames =
2435 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2436 stats->sf_rx_unsup_control_frames =
2437 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2438 stats->sf_rx_giants =
2439 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2440 stats->sf_rx_runts =
2441 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2442 stats->sf_rx_jabbererrs =
2443 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2444 stats->sf_rx_fragments =
2445 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2446 stats->sf_rx_pkts_64 =
2447 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2448 stats->sf_rx_pkts_65_127 =
2449 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2450 stats->sf_rx_pkts_128_255 =
2451 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2452 stats->sf_rx_pkts_256_511 =
2453 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2454 stats->sf_rx_pkts_512_1023 =
2455 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2456 stats->sf_rx_pkts_1024_1518 =
2457 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2458 stats->sf_rx_frames_lost =
2459 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2460 /* Lower 16bits are valid. */
2461 stats->sf_tx_underruns =
2462 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
1548
2463
1549 for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++)
1550 csr_write_4(sc, SF_STATS_BASE +
1551 (i + sizeof(u_int32_t)), 0);
2464 /* Empty stats counter registers. */
2465 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2466 csr_write_4(sc, i, 0);
1552
2467
1553 ifp->if_collisions += stats.sf_tx_single_colls +
1554 stats.sf_tx_multi_colls + stats.sf_tx_excess_colls;
2468 ifp->if_opackets += (u_long)stats->sf_tx_frames;
1555
2469
1556 mii_tick(mii);
2470 ifp->if_collisions += (u_long)stats->sf_tx_single_colls +
2471 (u_long)stats->sf_tx_multi_colls;
1557
2472
1558 if (!sc->sf_link && mii->mii_media_status & IFM_ACTIVE &&
1559 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1560 sc->sf_link++;
1561 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1562 sf_start_locked(ifp);
1563 }
2473 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls +
2474 (u_long)stats->sf_tx_excess_defer +
2475 (u_long)stats->sf_tx_frames_lost;
1564
2476
1565 callout_reset(&sc->sf_stat_callout, hz, sf_stats_update, sc);
2477 ifp->if_ipackets += (u_long)stats->sf_rx_frames;
2478
2479 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs +
2480 (u_long)stats->sf_rx_alignerrs +
2481 (u_long)stats->sf_rx_giants +
2482 (u_long)stats->sf_rx_runts +
2483 (u_long)stats->sf_rx_jabbererrs +
2484 (u_long)stats->sf_rx_frames_lost;
2485
2486 nstats = &sc->sf_statistics;
2487
2488 nstats->sf_tx_frames += stats->sf_tx_frames;
2489 nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2490 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2491 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2492 nstats->sf_tx_bytes += stats->sf_tx_bytes;
2493 nstats->sf_tx_deferred += stats->sf_tx_deferred;
2494 nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2495 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2496 nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2497 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2498 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2499 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2500 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2501 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2502 nstats->sf_rx_frames += stats->sf_rx_frames;
2503 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2504 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2505 nstats->sf_rx_bytes += stats->sf_rx_bytes;
2506 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2507 nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2508 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2509 nstats->sf_rx_giants += stats->sf_rx_giants;
2510 nstats->sf_rx_runts += stats->sf_rx_runts;
2511 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2512 nstats->sf_rx_fragments += stats->sf_rx_fragments;
2513 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2514 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2515 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2516 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2517 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2518 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2519 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2520 nstats->sf_tx_underruns += stats->sf_tx_underruns;
1566}
1567
1568static void
2521}
2522
2523static void
1569sf_watchdog(ifp)
1570 struct ifnet *ifp;
2524sf_watchdog(struct sf_softc *sc)
1571{
2525{
1572 struct sf_softc *sc;
2526 struct ifnet *ifp;
1573
2527
1574 sc = ifp->if_softc;
2528 SF_LOCK_ASSERT(sc);
1575
2529
1576 SF_LOCK(sc);
2530 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2531 return;
1577
2532
2533 ifp = sc->sf_ifp;
2534
1578 ifp->if_oerrors++;
2535 ifp->if_oerrors++;
1579 if_printf(ifp, "watchdog timeout\n");
2536 if (sc->sf_link == 0) {
2537 if (bootverbose)
2538 if_printf(sc->sf_ifp, "watchdog timeout "
2539 "(missed link)\n");
2540 } else
2541 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2542 sc->sf_cdata.sf_tx_cnt);
1580
2543
1581 sf_stop(sc);
1582 sf_reset(sc);
1583 sf_init_locked(sc);
1584
1585 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1586 sf_start_locked(ifp);
2544 sf_init_locked(sc);
2545
2546 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2547 sf_start_locked(ifp);
2548}
1587
2549
2550static int
2551sf_shutdown(device_t dev)
2552{
2553 struct sf_softc *sc;
2554
2555 sc = device_get_softc(dev);
2556
2557 SF_LOCK(sc);
2558 sf_stop(sc);
1588 SF_UNLOCK(sc);
2559 SF_UNLOCK(sc);
2560
2561 return (0);
1589}
1590
1591static int
2562}
2563
2564static int
1592sf_shutdown(dev)
1593 device_t dev;
2565sf_suspend(device_t dev)
1594{
1595 struct sf_softc *sc;
1596
1597 sc = device_get_softc(dev);
1598
1599 SF_LOCK(sc);
1600 sf_stop(sc);
2566{
2567 struct sf_softc *sc;
2568
2569 sc = device_get_softc(dev);
2570
2571 SF_LOCK(sc);
2572 sf_stop(sc);
2573 sc->sf_suspended = 1;
2574 bus_generic_suspend(dev);
1601 SF_UNLOCK(sc);
1602
1603 return (0);
1604}
2575 SF_UNLOCK(sc);
2576
2577 return (0);
2578}
2579
2580static int
2581sf_resume(device_t dev)
2582{
2583 struct sf_softc *sc;
2584 struct ifnet *ifp;
2585
2586 sc = device_get_softc(dev);
2587
2588 SF_LOCK(sc);
2589 bus_generic_resume(dev);
2590 ifp = sc->sf_ifp;
2591 if ((ifp->if_flags & IFF_UP) != 0)
2592 sf_init_locked(sc);
2593
2594 sc->sf_suspended = 0;
2595 SF_UNLOCK(sc);
2596
2597 return (0);
2598}
2599
2600static int
2601sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2602{
2603 struct sf_softc *sc;
2604 struct sf_stats *stats;
2605 int error;
2606 int result;
2607
2608 result = -1;
2609 error = sysctl_handle_int(oidp, &result, 0, req);
2610
2611 if (error != 0 || req->newptr == NULL)
2612 return (error);
2613
2614 if (result != 1)
2615 return (error);
2616
2617 sc = (struct sf_softc *)arg1;
2618 stats = &sc->sf_statistics;
2619
2620 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2621 printf("Transmit good frames : %ju\n",
2622 (uintmax_t)stats->sf_tx_frames);
2623 printf("Transmit good octets : %ju\n",
2624 (uintmax_t)stats->sf_tx_bytes);
2625 printf("Transmit single collisions : %u\n",
2626 stats->sf_tx_single_colls);
2627 printf("Transmit multiple collisions : %u\n",
2628 stats->sf_tx_multi_colls);
2629 printf("Transmit late collisions : %u\n",
2630 stats->sf_tx_late_colls);
2631 printf("Transmit abort due to excessive collisions : %u\n",
2632 stats->sf_tx_excess_colls);
2633 printf("Transmit CRC errors : %u\n",
2634 stats->sf_tx_crcerrs);
2635 printf("Transmit deferrals : %u\n",
2636 stats->sf_tx_deferred);
2637 printf("Transmit abort due to excessive deferrals : %u\n",
2638 stats->sf_tx_excess_defer);
2639 printf("Transmit pause control frames : %u\n",
2640 stats->sf_tx_pause_frames);
2641 printf("Transmit control frames : %u\n",
2642 stats->sf_tx_control_frames);
2643 printf("Transmit good multicast frames : %u\n",
2644 stats->sf_tx_mcast_frames);
2645 printf("Transmit good broadcast frames : %u\n",
2646 stats->sf_tx_bcast_frames);
2647 printf("Transmit frames lost due to internal transmit errors : %u\n",
2648 stats->sf_tx_frames_lost);
2649 printf("Transmit FIFO underflows : %u\n",
2650 stats->sf_tx_underruns);
2651 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2652 printf("Receive good frames : %ju\n",
2653 (uint64_t)stats->sf_rx_frames);
2654 printf("Receive good octets : %ju\n",
2655 (uint64_t)stats->sf_rx_bytes);
2656 printf("Receive CRC errors : %u\n",
2657 stats->sf_rx_crcerrs);
2658 printf("Receive alignment errors : %u\n",
2659 stats->sf_rx_alignerrs);
2660 printf("Receive pause frames : %u\n",
2661 stats->sf_rx_pause_frames);
2662 printf("Receive control frames : %u\n",
2663 stats->sf_rx_control_frames);
2664 printf("Receive control frames with unsupported opcode : %u\n",
2665 stats->sf_rx_unsup_control_frames);
2666 printf("Receive frames too long : %u\n",
2667 stats->sf_rx_giants);
2668 printf("Receive frames too short : %u\n",
2669 stats->sf_rx_runts);
2670 printf("Receive frames jabber errors : %u\n",
2671 stats->sf_rx_jabbererrs);
2672 printf("Receive frames fragments : %u\n",
2673 stats->sf_rx_fragments);
2674 printf("Receive packets 64 bytes : %ju\n",
2675 (uint64_t)stats->sf_rx_pkts_64);
2676 printf("Receive packets 65 to 127 bytes : %ju\n",
2677 (uint64_t)stats->sf_rx_pkts_65_127);
2678 printf("Receive packets 128 to 255 bytes : %ju\n",
2679 (uint64_t)stats->sf_rx_pkts_128_255);
2680 printf("Receive packets 256 to 511 bytes : %ju\n",
2681 (uint64_t)stats->sf_rx_pkts_256_511);
2682 printf("Receive packets 512 to 1023 bytes : %ju\n",
2683 (uint64_t)stats->sf_rx_pkts_512_1023);
2684 printf("Receive packets 1024 to 1518 bytes : %ju\n",
2685 (uint64_t)stats->sf_rx_pkts_1024_1518);
2686 printf("Receive frames lost due to internal receive errors : %u\n",
2687 stats->sf_rx_frames_lost);
2688 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2689
2690 return (error);
2691}
2692
2693static int
2694sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2695{
2696 int error, value;
2697
2698 if (!arg1)
2699 return (EINVAL);
2700 value = *(int *)arg1;
2701 error = sysctl_handle_int(oidp, &value, 0, req);
2702 if (error || !req->newptr)
2703 return (error);
2704 if (value < low || value > high)
2705 return (EINVAL);
2706 *(int *)arg1 = value;
2707
2708 return (0);
2709}
2710
2711static int
2712sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2713{
2714
2715 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));
2716}