Deleted Added
full compact
if_stge.c (229767) if_stge.c (242625)
1/* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */
2
3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Device driver for the Sundance Tech. TC9021 10/100/1000
34 * Ethernet controller.
35 */
36
37#include <sys/cdefs.h>
1/* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */
2
3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Device driver for the Sundance Tech. TC9021 10/100/1000
34 * Ethernet controller.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/stge/if_stge.c 229767 2012-01-07 09:41:57Z kevlo $");
38__FBSDID("$FreeBSD: head/sys/dev/stge/if_stge.c 242625 2012-11-05 19:16:27Z dim $");
39
40#ifdef HAVE_KERNEL_OPTION_HEADERS
41#include "opt_device_polling.h"
42#endif
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/endian.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/kernel.h>
50#include <sys/module.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55
56#include <net/bpf.h>
57#include <net/ethernet.h>
58#include <net/if.h>
59#include <net/if_dl.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62#include <net/if_vlan_var.h>
63
64#include <machine/bus.h>
65#include <machine/resource.h>
66#include <sys/bus.h>
67#include <sys/rman.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/mii_bitbang.h>
71#include <dev/mii/miivar.h>
72
73#include <dev/pci/pcireg.h>
74#include <dev/pci/pcivar.h>
75
76#include <dev/stge/if_stgereg.h>
77
78#define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
79
80MODULE_DEPEND(stge, pci, 1, 1, 1);
81MODULE_DEPEND(stge, ether, 1, 1, 1);
82MODULE_DEPEND(stge, miibus, 1, 1, 1);
83
84/* "device miibus" required. See GENERIC if you get errors here. */
85#include "miibus_if.h"
86
87/*
88 * Devices supported by this driver.
89 */
90static const struct stge_product {
91 uint16_t stge_vendorid;
92 uint16_t stge_deviceid;
93 const char *stge_name;
39
40#ifdef HAVE_KERNEL_OPTION_HEADERS
41#include "opt_device_polling.h"
42#endif
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/endian.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/kernel.h>
50#include <sys/module.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55
56#include <net/bpf.h>
57#include <net/ethernet.h>
58#include <net/if.h>
59#include <net/if_dl.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62#include <net/if_vlan_var.h>
63
64#include <machine/bus.h>
65#include <machine/resource.h>
66#include <sys/bus.h>
67#include <sys/rman.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/mii_bitbang.h>
71#include <dev/mii/miivar.h>
72
73#include <dev/pci/pcireg.h>
74#include <dev/pci/pcivar.h>
75
76#include <dev/stge/if_stgereg.h>
77
78#define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
79
80MODULE_DEPEND(stge, pci, 1, 1, 1);
81MODULE_DEPEND(stge, ether, 1, 1, 1);
82MODULE_DEPEND(stge, miibus, 1, 1, 1);
83
84/* "device miibus" required. See GENERIC if you get errors here. */
85#include "miibus_if.h"
86
87/*
88 * Devices supported by this driver.
89 */
90static const struct stge_product {
91 uint16_t stge_vendorid;
92 uint16_t stge_deviceid;
93 const char *stge_name;
94} const stge_products[] = {
94} stge_products[] = {
95 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023,
96 "Sundance ST-1023 Gigabit Ethernet" },
97
98 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021,
99 "Sundance ST-2021 Gigabit Ethernet" },
100
101 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021,
102 "Tamarack TC9021 Gigabit Ethernet" },
103
104 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT,
105 "Tamarack TC9021 Gigabit Ethernet" },
106
107 /*
108 * The Sundance sample boards use the Sundance vendor ID,
109 * but the Tamarack product ID.
110 */
111 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021,
112 "Sundance TC9021 Gigabit Ethernet" },
113
114 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT,
115 "Sundance TC9021 Gigabit Ethernet" },
116
117 { VENDOR_DLINK, DEVICEID_DLINK_DL4000,
118 "D-Link DL-4000 Gigabit Ethernet" },
119
120 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021,
121 "Antares Gigabit Ethernet" }
122};
123
124static int stge_probe(device_t);
125static int stge_attach(device_t);
126static int stge_detach(device_t);
127static int stge_shutdown(device_t);
128static int stge_suspend(device_t);
129static int stge_resume(device_t);
130
131static int stge_encap(struct stge_softc *, struct mbuf **);
132static void stge_start(struct ifnet *);
133static void stge_start_locked(struct ifnet *);
134static void stge_watchdog(struct stge_softc *);
135static int stge_ioctl(struct ifnet *, u_long, caddr_t);
136static void stge_init(void *);
137static void stge_init_locked(struct stge_softc *);
138static void stge_vlan_setup(struct stge_softc *);
139static void stge_stop(struct stge_softc *);
140static void stge_start_tx(struct stge_softc *);
141static void stge_start_rx(struct stge_softc *);
142static void stge_stop_tx(struct stge_softc *);
143static void stge_stop_rx(struct stge_softc *);
144
145static void stge_reset(struct stge_softc *, uint32_t);
146static int stge_eeprom_wait(struct stge_softc *);
147static void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
148static void stge_tick(void *);
149static void stge_stats_update(struct stge_softc *);
150static void stge_set_filter(struct stge_softc *);
151static void stge_set_multi(struct stge_softc *);
152
153static void stge_link_task(void *, int);
154static void stge_intr(void *);
155static __inline int stge_tx_error(struct stge_softc *);
156static void stge_txeof(struct stge_softc *);
157static int stge_rxeof(struct stge_softc *);
158static __inline void stge_discard_rxbuf(struct stge_softc *, int);
159static int stge_newbuf(struct stge_softc *, int);
160#ifndef __NO_STRICT_ALIGNMENT
161static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
162#endif
163
164static int stge_miibus_readreg(device_t, int, int);
165static int stge_miibus_writereg(device_t, int, int, int);
166static void stge_miibus_statchg(device_t);
167static int stge_mediachange(struct ifnet *);
168static void stge_mediastatus(struct ifnet *, struct ifmediareq *);
169
170static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
171static int stge_dma_alloc(struct stge_softc *);
172static void stge_dma_free(struct stge_softc *);
173static void stge_dma_wait(struct stge_softc *);
174static void stge_init_tx_ring(struct stge_softc *);
175static int stge_init_rx_ring(struct stge_softc *);
176#ifdef DEVICE_POLLING
177static int stge_poll(struct ifnet *, enum poll_cmd, int);
178#endif
179
180static void stge_setwol(struct stge_softc *);
181static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
182static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
183static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
184
185/*
186 * MII bit-bang glue
187 */
188static uint32_t stge_mii_bitbang_read(device_t);
189static void stge_mii_bitbang_write(device_t, uint32_t);
190
191static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
192 stge_mii_bitbang_read,
193 stge_mii_bitbang_write,
194 {
195 PC_MgmtData, /* MII_BIT_MDO */
196 PC_MgmtData, /* MII_BIT_MDI */
197 PC_MgmtClk, /* MII_BIT_MDC */
198 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
199 0, /* MII_BIT_DIR_PHY_HOST */
200 }
201};
202
203static device_method_t stge_methods[] = {
204 /* Device interface */
205 DEVMETHOD(device_probe, stge_probe),
206 DEVMETHOD(device_attach, stge_attach),
207 DEVMETHOD(device_detach, stge_detach),
208 DEVMETHOD(device_shutdown, stge_shutdown),
209 DEVMETHOD(device_suspend, stge_suspend),
210 DEVMETHOD(device_resume, stge_resume),
211
212 /* MII interface */
213 DEVMETHOD(miibus_readreg, stge_miibus_readreg),
214 DEVMETHOD(miibus_writereg, stge_miibus_writereg),
215 DEVMETHOD(miibus_statchg, stge_miibus_statchg),
216
217 DEVMETHOD_END
218};
219
220static driver_t stge_driver = {
221 "stge",
222 stge_methods,
223 sizeof(struct stge_softc)
224};
225
226static devclass_t stge_devclass;
227
228DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
229DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
230
231static struct resource_spec stge_res_spec_io[] = {
232 { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE },
233 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
234 { -1, 0, 0 }
235};
236
237static struct resource_spec stge_res_spec_mem[] = {
238 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE },
239 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
240 { -1, 0, 0 }
241};
242
243/*
244 * stge_mii_bitbang_read: [mii bit-bang interface function]
245 *
246 * Read the MII serial port for the MII bit-bang module.
247 */
248static uint32_t
249stge_mii_bitbang_read(device_t dev)
250{
251 struct stge_softc *sc;
252 uint32_t val;
253
254 sc = device_get_softc(dev);
255
256 val = CSR_READ_1(sc, STGE_PhyCtrl);
257 CSR_BARRIER(sc, STGE_PhyCtrl, 1,
258 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
259 return (val);
260}
261
262/*
263 * stge_mii_bitbang_write: [mii big-bang interface function]
264 *
265 * Write the MII serial port for the MII bit-bang module.
266 */
267static void
268stge_mii_bitbang_write(device_t dev, uint32_t val)
269{
270 struct stge_softc *sc;
271
272 sc = device_get_softc(dev);
273
274 CSR_WRITE_1(sc, STGE_PhyCtrl, val);
275 CSR_BARRIER(sc, STGE_PhyCtrl, 1,
276 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
277}
278
279/*
280 * sc_miibus_readreg: [mii interface function]
281 *
282 * Read a PHY register on the MII of the TC9021.
283 */
284static int
285stge_miibus_readreg(device_t dev, int phy, int reg)
286{
287 struct stge_softc *sc;
288 int error, val;
289
290 sc = device_get_softc(dev);
291
292 if (reg == STGE_PhyCtrl) {
293 /* XXX allow ip1000phy read STGE_PhyCtrl register. */
294 STGE_MII_LOCK(sc);
295 error = CSR_READ_1(sc, STGE_PhyCtrl);
296 STGE_MII_UNLOCK(sc);
297 return (error);
298 }
299
300 STGE_MII_LOCK(sc);
301 val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
302 STGE_MII_UNLOCK(sc);
303 return (val);
304}
305
306/*
307 * stge_miibus_writereg: [mii interface function]
308 *
309 * Write a PHY register on the MII of the TC9021.
310 */
311static int
312stge_miibus_writereg(device_t dev, int phy, int reg, int val)
313{
314 struct stge_softc *sc;
315
316 sc = device_get_softc(dev);
317
318 STGE_MII_LOCK(sc);
319 mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
320 STGE_MII_UNLOCK(sc);
321 return (0);
322}
323
324/*
325 * stge_miibus_statchg: [mii interface function]
326 *
327 * Callback from MII layer when media changes.
328 */
329static void
330stge_miibus_statchg(device_t dev)
331{
332 struct stge_softc *sc;
333
334 sc = device_get_softc(dev);
335 taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
336}
337
338/*
339 * stge_mediastatus: [ifmedia interface function]
340 *
341 * Get the current interface media status.
342 */
343static void
344stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
345{
346 struct stge_softc *sc;
347 struct mii_data *mii;
348
349 sc = ifp->if_softc;
350 mii = device_get_softc(sc->sc_miibus);
351
352 mii_pollstat(mii);
353 ifmr->ifm_status = mii->mii_media_status;
354 ifmr->ifm_active = mii->mii_media_active;
355}
356
357/*
358 * stge_mediachange: [ifmedia interface function]
359 *
360 * Set hardware to newly-selected media.
361 */
362static int
363stge_mediachange(struct ifnet *ifp)
364{
365 struct stge_softc *sc;
366 struct mii_data *mii;
367
368 sc = ifp->if_softc;
369 mii = device_get_softc(sc->sc_miibus);
370 mii_mediachg(mii);
371
372 return (0);
373}
374
375static int
376stge_eeprom_wait(struct stge_softc *sc)
377{
378 int i;
379
380 for (i = 0; i < STGE_TIMEOUT; i++) {
381 DELAY(1000);
382 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
383 return (0);
384 }
385 return (1);
386}
387
388/*
389 * stge_read_eeprom:
390 *
391 * Read data from the serial EEPROM.
392 */
393static void
394stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
395{
396
397 if (stge_eeprom_wait(sc))
398 device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
399
400 CSR_WRITE_2(sc, STGE_EepromCtrl,
401 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
402 if (stge_eeprom_wait(sc))
403 device_printf(sc->sc_dev, "EEPROM read timed out\n");
404 *data = CSR_READ_2(sc, STGE_EepromData);
405}
406
407
408static int
409stge_probe(device_t dev)
410{
411 const struct stge_product *sp;
412 int i;
413 uint16_t vendor, devid;
414
415 vendor = pci_get_vendor(dev);
416 devid = pci_get_device(dev);
417 sp = stge_products;
418 for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
419 i++, sp++) {
420 if (vendor == sp->stge_vendorid &&
421 devid == sp->stge_deviceid) {
422 device_set_desc(dev, sp->stge_name);
423 return (BUS_PROBE_DEFAULT);
424 }
425 }
426
427 return (ENXIO);
428}
429
430static int
431stge_attach(device_t dev)
432{
433 struct stge_softc *sc;
434 struct ifnet *ifp;
435 uint8_t enaddr[ETHER_ADDR_LEN];
436 int error, flags, i;
437 uint16_t cmd;
438 uint32_t val;
439
440 error = 0;
441 sc = device_get_softc(dev);
442 sc->sc_dev = dev;
443
444 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
445 MTX_DEF);
446 mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
447 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
448 TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
449
450 /*
451 * Map the device.
452 */
453 pci_enable_busmaster(dev);
454 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
455 val = pci_read_config(dev, PCIR_BAR(1), 4);
456 if ((val & 0x01) != 0)
457 sc->sc_spec = stge_res_spec_mem;
458 else {
459 val = pci_read_config(dev, PCIR_BAR(0), 4);
460 if ((val & 0x01) == 0) {
461 device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
462 error = ENXIO;
463 goto fail;
464 }
465 sc->sc_spec = stge_res_spec_io;
466 }
467 error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
468 if (error != 0) {
469 device_printf(dev, "couldn't allocate %s resources\n",
470 sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
471 goto fail;
472 }
473 sc->sc_rev = pci_get_revid(dev);
474
475 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
476 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
477 "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
478 sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
479
480 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
481 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
482 "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
483 sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
484
485 /* Pull in device tunables. */
486 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
487 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
488 "rxint_nframe", &sc->sc_rxint_nframe);
489 if (error == 0) {
490 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
491 sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
492 device_printf(dev, "rxint_nframe value out of range; "
493 "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
494 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
495 }
496 }
497
498 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
499 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
500 "rxint_dmawait", &sc->sc_rxint_dmawait);
501 if (error == 0) {
502 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
503 sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
504 device_printf(dev, "rxint_dmawait value out of range; "
505 "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
506 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
507 }
508 }
509
510 if ((error = stge_dma_alloc(sc) != 0))
511 goto fail;
512
513 /*
514 * Determine if we're copper or fiber. It affects how we
515 * reset the card.
516 */
517 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
518 sc->sc_usefiber = 1;
519 else
520 sc->sc_usefiber = 0;
521
522 /* Load LED configuration from EEPROM. */
523 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
524
525 /*
526 * Reset the chip to a known state.
527 */
528 STGE_LOCK(sc);
529 stge_reset(sc, STGE_RESET_FULL);
530 STGE_UNLOCK(sc);
531
532 /*
533 * Reading the station address from the EEPROM doesn't seem
534 * to work, at least on my sample boards. Instead, since
535 * the reset sequence does AutoInit, read it from the station
536 * address registers. For Sundance 1023 you can only read it
537 * from EEPROM.
538 */
539 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
540 uint16_t v;
541
542 v = CSR_READ_2(sc, STGE_StationAddress0);
543 enaddr[0] = v & 0xff;
544 enaddr[1] = v >> 8;
545 v = CSR_READ_2(sc, STGE_StationAddress1);
546 enaddr[2] = v & 0xff;
547 enaddr[3] = v >> 8;
548 v = CSR_READ_2(sc, STGE_StationAddress2);
549 enaddr[4] = v & 0xff;
550 enaddr[5] = v >> 8;
551 sc->sc_stge1023 = 0;
552 } else {
553 uint16_t myaddr[ETHER_ADDR_LEN / 2];
554 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
555 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
556 &myaddr[i]);
557 myaddr[i] = le16toh(myaddr[i]);
558 }
559 bcopy(myaddr, enaddr, sizeof(enaddr));
560 sc->sc_stge1023 = 1;
561 }
562
563 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
564 if (ifp == NULL) {
565 device_printf(sc->sc_dev, "failed to if_alloc()\n");
566 error = ENXIO;
567 goto fail;
568 }
569
570 ifp->if_softc = sc;
571 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
572 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
573 ifp->if_ioctl = stge_ioctl;
574 ifp->if_start = stge_start;
575 ifp->if_init = stge_init;
576 ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
577 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
578 IFQ_SET_READY(&ifp->if_snd);
579 /* Revision B3 and earlier chips have checksum bug. */
580 if (sc->sc_rev >= 0x0c) {
581 ifp->if_hwassist = STGE_CSUM_FEATURES;
582 ifp->if_capabilities = IFCAP_HWCSUM;
583 } else {
584 ifp->if_hwassist = 0;
585 ifp->if_capabilities = 0;
586 }
587 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
588 ifp->if_capenable = ifp->if_capabilities;
589
590 /*
591 * Read some important bits from the PhyCtrl register.
592 */
593 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
594 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
595
596 /* Set up MII bus. */
597 flags = MIIF_DOPAUSE;
598 if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
599 flags |= MIIF_MACPRIV0;
600 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
601 stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
602 flags);
603 if (error != 0) {
604 device_printf(sc->sc_dev, "attaching PHYs failed\n");
605 goto fail;
606 }
607
608 ether_ifattach(ifp, enaddr);
609
610 /* VLAN capability setup */
611 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
612 if (sc->sc_rev >= 0x0c)
613 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
614 ifp->if_capenable = ifp->if_capabilities;
615#ifdef DEVICE_POLLING
616 ifp->if_capabilities |= IFCAP_POLLING;
617#endif
618 /*
619 * Tell the upper layer(s) we support long frames.
620 * Must appear after the call to ether_ifattach() because
621 * ether_ifattach() sets ifi_hdrlen to the default value.
622 */
623 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
624
625 /*
626 * The manual recommends disabling early transmit, so we
627 * do. It's disabled anyway, if using IP checksumming,
628 * since the entire packet must be in the FIFO in order
629 * for the chip to perform the checksum.
630 */
631 sc->sc_txthresh = 0x0fff;
632
633 /*
634 * Disable MWI if the PCI layer tells us to.
635 */
636 sc->sc_DMACtrl = 0;
637 if ((cmd & PCIM_CMD_MWRICEN) == 0)
638 sc->sc_DMACtrl |= DMAC_MWIDisable;
639
640 /*
641 * Hookup IRQ
642 */
643 error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
644 NULL, stge_intr, sc, &sc->sc_ih);
645 if (error != 0) {
646 ether_ifdetach(ifp);
647 device_printf(sc->sc_dev, "couldn't set up IRQ\n");
648 sc->sc_ifp = NULL;
649 goto fail;
650 }
651
652fail:
653 if (error != 0)
654 stge_detach(dev);
655
656 return (error);
657}
658
659static int
660stge_detach(device_t dev)
661{
662 struct stge_softc *sc;
663 struct ifnet *ifp;
664
665 sc = device_get_softc(dev);
666
667 ifp = sc->sc_ifp;
668#ifdef DEVICE_POLLING
669 if (ifp && ifp->if_capenable & IFCAP_POLLING)
670 ether_poll_deregister(ifp);
671#endif
672 if (device_is_attached(dev)) {
673 STGE_LOCK(sc);
674 /* XXX */
675 sc->sc_detach = 1;
676 stge_stop(sc);
677 STGE_UNLOCK(sc);
678 callout_drain(&sc->sc_tick_ch);
679 taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
680 ether_ifdetach(ifp);
681 }
682
683 if (sc->sc_miibus != NULL) {
684 device_delete_child(dev, sc->sc_miibus);
685 sc->sc_miibus = NULL;
686 }
687 bus_generic_detach(dev);
688 stge_dma_free(sc);
689
690 if (ifp != NULL) {
691 if_free(ifp);
692 sc->sc_ifp = NULL;
693 }
694
695 if (sc->sc_ih) {
696 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
697 sc->sc_ih = NULL;
698 }
699 bus_release_resources(dev, sc->sc_spec, sc->sc_res);
700
701 mtx_destroy(&sc->sc_mii_mtx);
702 mtx_destroy(&sc->sc_mtx);
703
704 return (0);
705}
706
707struct stge_dmamap_arg {
708 bus_addr_t stge_busaddr;
709};
710
711static void
712stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
713{
714 struct stge_dmamap_arg *ctx;
715
716 if (error != 0)
717 return;
718
719 ctx = (struct stge_dmamap_arg *)arg;
720 ctx->stge_busaddr = segs[0].ds_addr;
721}
722
723static int
724stge_dma_alloc(struct stge_softc *sc)
725{
726 struct stge_dmamap_arg ctx;
727 struct stge_txdesc *txd;
728 struct stge_rxdesc *rxd;
729 int error, i;
730
731 /* create parent tag. */
732 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
733 1, 0, /* algnmnt, boundary */
734 STGE_DMA_MAXADDR, /* lowaddr */
735 BUS_SPACE_MAXADDR, /* highaddr */
736 NULL, NULL, /* filter, filterarg */
737 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
738 0, /* nsegments */
739 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
740 0, /* flags */
741 NULL, NULL, /* lockfunc, lockarg */
742 &sc->sc_cdata.stge_parent_tag);
743 if (error != 0) {
744 device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
745 goto fail;
746 }
747 /* create tag for Tx ring. */
748 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
749 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
750 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
751 BUS_SPACE_MAXADDR, /* highaddr */
752 NULL, NULL, /* filter, filterarg */
753 STGE_TX_RING_SZ, /* maxsize */
754 1, /* nsegments */
755 STGE_TX_RING_SZ, /* maxsegsize */
756 0, /* flags */
757 NULL, NULL, /* lockfunc, lockarg */
758 &sc->sc_cdata.stge_tx_ring_tag);
759 if (error != 0) {
760 device_printf(sc->sc_dev,
761 "failed to allocate Tx ring DMA tag\n");
762 goto fail;
763 }
764
765 /* create tag for Rx ring. */
766 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
767 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
768 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
769 BUS_SPACE_MAXADDR, /* highaddr */
770 NULL, NULL, /* filter, filterarg */
771 STGE_RX_RING_SZ, /* maxsize */
772 1, /* nsegments */
773 STGE_RX_RING_SZ, /* maxsegsize */
774 0, /* flags */
775 NULL, NULL, /* lockfunc, lockarg */
776 &sc->sc_cdata.stge_rx_ring_tag);
777 if (error != 0) {
778 device_printf(sc->sc_dev,
779 "failed to allocate Rx ring DMA tag\n");
780 goto fail;
781 }
782
783 /* create tag for Tx buffers. */
784 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
785 1, 0, /* algnmnt, boundary */
786 BUS_SPACE_MAXADDR, /* lowaddr */
787 BUS_SPACE_MAXADDR, /* highaddr */
788 NULL, NULL, /* filter, filterarg */
789 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */
790 STGE_MAXTXSEGS, /* nsegments */
791 MCLBYTES, /* maxsegsize */
792 0, /* flags */
793 NULL, NULL, /* lockfunc, lockarg */
794 &sc->sc_cdata.stge_tx_tag);
795 if (error != 0) {
796 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
797 goto fail;
798 }
799
800 /* create tag for Rx buffers. */
801 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
802 1, 0, /* algnmnt, boundary */
803 BUS_SPACE_MAXADDR, /* lowaddr */
804 BUS_SPACE_MAXADDR, /* highaddr */
805 NULL, NULL, /* filter, filterarg */
806 MCLBYTES, /* maxsize */
807 1, /* nsegments */
808 MCLBYTES, /* maxsegsize */
809 0, /* flags */
810 NULL, NULL, /* lockfunc, lockarg */
811 &sc->sc_cdata.stge_rx_tag);
812 if (error != 0) {
813 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
814 goto fail;
815 }
816
817 /* allocate DMA'able memory and load the DMA map for Tx ring. */
818 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
819 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT |
820 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map);
821 if (error != 0) {
822 device_printf(sc->sc_dev,
823 "failed to allocate DMA'able memory for Tx ring\n");
824 goto fail;
825 }
826
827 ctx.stge_busaddr = 0;
828 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
829 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
830 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
831 if (error != 0 || ctx.stge_busaddr == 0) {
832 device_printf(sc->sc_dev,
833 "failed to load DMA'able memory for Tx ring\n");
834 goto fail;
835 }
836 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
837
838 /* allocate DMA'able memory and load the DMA map for Rx ring. */
839 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
840 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT |
841 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map);
842 if (error != 0) {
843 device_printf(sc->sc_dev,
844 "failed to allocate DMA'able memory for Rx ring\n");
845 goto fail;
846 }
847
848 ctx.stge_busaddr = 0;
849 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
850 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
851 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
852 if (error != 0 || ctx.stge_busaddr == 0) {
853 device_printf(sc->sc_dev,
854 "failed to load DMA'able memory for Rx ring\n");
855 goto fail;
856 }
857 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
858
859 /* create DMA maps for Tx buffers. */
860 for (i = 0; i < STGE_TX_RING_CNT; i++) {
861 txd = &sc->sc_cdata.stge_txdesc[i];
862 txd->tx_m = NULL;
863 txd->tx_dmamap = 0;
864 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
865 &txd->tx_dmamap);
866 if (error != 0) {
867 device_printf(sc->sc_dev,
868 "failed to create Tx dmamap\n");
869 goto fail;
870 }
871 }
872 /* create DMA maps for Rx buffers. */
873 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
874 &sc->sc_cdata.stge_rx_sparemap)) != 0) {
875 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
876 goto fail;
877 }
878 for (i = 0; i < STGE_RX_RING_CNT; i++) {
879 rxd = &sc->sc_cdata.stge_rxdesc[i];
880 rxd->rx_m = NULL;
881 rxd->rx_dmamap = 0;
882 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
883 &rxd->rx_dmamap);
884 if (error != 0) {
885 device_printf(sc->sc_dev,
886 "failed to create Rx dmamap\n");
887 goto fail;
888 }
889 }
890
891fail:
892 return (error);
893}
894
895static void
896stge_dma_free(struct stge_softc *sc)
897{
898 struct stge_txdesc *txd;
899 struct stge_rxdesc *rxd;
900 int i;
901
902 /* Tx ring */
903 if (sc->sc_cdata.stge_tx_ring_tag) {
904 if (sc->sc_cdata.stge_tx_ring_map)
905 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
906 sc->sc_cdata.stge_tx_ring_map);
907 if (sc->sc_cdata.stge_tx_ring_map &&
908 sc->sc_rdata.stge_tx_ring)
909 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
910 sc->sc_rdata.stge_tx_ring,
911 sc->sc_cdata.stge_tx_ring_map);
912 sc->sc_rdata.stge_tx_ring = NULL;
913 sc->sc_cdata.stge_tx_ring_map = 0;
914 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
915 sc->sc_cdata.stge_tx_ring_tag = NULL;
916 }
917 /* Rx ring */
918 if (sc->sc_cdata.stge_rx_ring_tag) {
919 if (sc->sc_cdata.stge_rx_ring_map)
920 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
921 sc->sc_cdata.stge_rx_ring_map);
922 if (sc->sc_cdata.stge_rx_ring_map &&
923 sc->sc_rdata.stge_rx_ring)
924 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
925 sc->sc_rdata.stge_rx_ring,
926 sc->sc_cdata.stge_rx_ring_map);
927 sc->sc_rdata.stge_rx_ring = NULL;
928 sc->sc_cdata.stge_rx_ring_map = 0;
929 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
930 sc->sc_cdata.stge_rx_ring_tag = NULL;
931 }
932 /* Tx buffers */
933 if (sc->sc_cdata.stge_tx_tag) {
934 for (i = 0; i < STGE_TX_RING_CNT; i++) {
935 txd = &sc->sc_cdata.stge_txdesc[i];
936 if (txd->tx_dmamap) {
937 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
938 txd->tx_dmamap);
939 txd->tx_dmamap = 0;
940 }
941 }
942 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
943 sc->sc_cdata.stge_tx_tag = NULL;
944 }
945 /* Rx buffers */
946 if (sc->sc_cdata.stge_rx_tag) {
947 for (i = 0; i < STGE_RX_RING_CNT; i++) {
948 rxd = &sc->sc_cdata.stge_rxdesc[i];
949 if (rxd->rx_dmamap) {
950 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
951 rxd->rx_dmamap);
952 rxd->rx_dmamap = 0;
953 }
954 }
955 if (sc->sc_cdata.stge_rx_sparemap) {
956 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
957 sc->sc_cdata.stge_rx_sparemap);
958 sc->sc_cdata.stge_rx_sparemap = 0;
959 }
960 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
961 sc->sc_cdata.stge_rx_tag = NULL;
962 }
963
964 if (sc->sc_cdata.stge_parent_tag) {
965 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
966 sc->sc_cdata.stge_parent_tag = NULL;
967 }
968}
969
970/*
971 * stge_shutdown:
972 *
973 * Make sure the interface is stopped at reboot time.
974 */
975static int
976stge_shutdown(device_t dev)
977{
978
979 return (stge_suspend(dev));
980}
981
982static void
983stge_setwol(struct stge_softc *sc)
984{
985 struct ifnet *ifp;
986 uint8_t v;
987
988 STGE_LOCK_ASSERT(sc);
989
990 ifp = sc->sc_ifp;
991 v = CSR_READ_1(sc, STGE_WakeEvent);
992 /* Disable all WOL bits. */
993 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
994 WE_WakeOnLanEnable);
995 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
996 v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
997 CSR_WRITE_1(sc, STGE_WakeEvent, v);
998 /* Reset Tx and prevent transmission. */
999 CSR_WRITE_4(sc, STGE_AsicCtrl,
1000 CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
1001 /*
1002 * TC9021 automatically reset link speed to 100Mbps when it's put
1003 * into sleep so there is no need to try to resetting link speed.
1004 */
1005}
1006
1007static int
1008stge_suspend(device_t dev)
1009{
1010 struct stge_softc *sc;
1011
1012 sc = device_get_softc(dev);
1013
1014 STGE_LOCK(sc);
1015 stge_stop(sc);
1016 sc->sc_suspended = 1;
1017 stge_setwol(sc);
1018 STGE_UNLOCK(sc);
1019
1020 return (0);
1021}
1022
1023static int
1024stge_resume(device_t dev)
1025{
1026 struct stge_softc *sc;
1027 struct ifnet *ifp;
1028 uint8_t v;
1029
1030 sc = device_get_softc(dev);
1031
1032 STGE_LOCK(sc);
1033 /*
1034 * Clear WOL bits, so special frames wouldn't interfere
1035 * normal Rx operation anymore.
1036 */
1037 v = CSR_READ_1(sc, STGE_WakeEvent);
1038 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
1039 WE_WakeOnLanEnable);
1040 CSR_WRITE_1(sc, STGE_WakeEvent, v);
1041 ifp = sc->sc_ifp;
1042 if (ifp->if_flags & IFF_UP)
1043 stge_init_locked(sc);
1044
1045 sc->sc_suspended = 0;
1046 STGE_UNLOCK(sc);
1047
1048 return (0);
1049}
1050
1051static void
1052stge_dma_wait(struct stge_softc *sc)
1053{
1054 int i;
1055
1056 for (i = 0; i < STGE_TIMEOUT; i++) {
1057 DELAY(2);
1058 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1059 break;
1060 }
1061
1062 if (i == STGE_TIMEOUT)
1063 device_printf(sc->sc_dev, "DMA wait timed out\n");
1064}
1065
1066static int
1067stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1068{
1069 struct stge_txdesc *txd;
1070 struct stge_tfd *tfd;
1071 struct mbuf *m;
1072 bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1073 int error, i, nsegs, si;
1074 uint64_t csum_flags, tfc;
1075
1076 STGE_LOCK_ASSERT(sc);
1077
1078 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1079 return (ENOBUFS);
1080
1081 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1082 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1083 if (error == EFBIG) {
1084 m = m_collapse(*m_head, M_DONTWAIT, STGE_MAXTXSEGS);
1085 if (m == NULL) {
1086 m_freem(*m_head);
1087 *m_head = NULL;
1088 return (ENOMEM);
1089 }
1090 *m_head = m;
1091 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1092 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1093 if (error != 0) {
1094 m_freem(*m_head);
1095 *m_head = NULL;
1096 return (error);
1097 }
1098 } else if (error != 0)
1099 return (error);
1100 if (nsegs == 0) {
1101 m_freem(*m_head);
1102 *m_head = NULL;
1103 return (EIO);
1104 }
1105
1106 m = *m_head;
1107 csum_flags = 0;
1108 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1109 if (m->m_pkthdr.csum_flags & CSUM_IP)
1110 csum_flags |= TFD_IPChecksumEnable;
1111 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1112 csum_flags |= TFD_TCPChecksumEnable;
1113 else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1114 csum_flags |= TFD_UDPChecksumEnable;
1115 }
1116
1117 si = sc->sc_cdata.stge_tx_prod;
1118 tfd = &sc->sc_rdata.stge_tx_ring[si];
1119 for (i = 0; i < nsegs; i++)
1120 tfd->tfd_frags[i].frag_word0 =
1121 htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1122 FRAG_LEN(txsegs[i].ds_len));
1123 sc->sc_cdata.stge_tx_cnt++;
1124
1125 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1126 TFD_FragCount(nsegs) | csum_flags;
1127 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1128 tfc |= TFD_TxDMAIndicate;
1129
1130 /* Update producer index. */
1131 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1132
1133 /* Check if we have a VLAN tag to insert. */
1134 if (m->m_flags & M_VLANTAG)
1135 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1136 tfd->tfd_control = htole64(tfc);
1137
1138 /* Update Tx Queue. */
1139 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1140 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1141 txd->tx_m = m;
1142
1143 /* Sync descriptors. */
1144 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1145 BUS_DMASYNC_PREWRITE);
1146 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1147 sc->sc_cdata.stge_tx_ring_map,
1148 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1149
1150 return (0);
1151}
1152
1153/*
1154 * stge_start: [ifnet interface function]
1155 *
1156 * Start packet transmission on the interface.
1157 */
1158static void
1159stge_start(struct ifnet *ifp)
1160{
1161 struct stge_softc *sc;
1162
1163 sc = ifp->if_softc;
1164 STGE_LOCK(sc);
1165 stge_start_locked(ifp);
1166 STGE_UNLOCK(sc);
1167}
1168
1169static void
1170stge_start_locked(struct ifnet *ifp)
1171{
1172 struct stge_softc *sc;
1173 struct mbuf *m_head;
1174 int enq;
1175
1176 sc = ifp->if_softc;
1177
1178 STGE_LOCK_ASSERT(sc);
1179
1180 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1181 IFF_DRV_RUNNING || sc->sc_link == 0)
1182 return;
1183
1184 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1185 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1186 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1187 break;
1188 }
1189
1190 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1191 if (m_head == NULL)
1192 break;
1193 /*
1194 * Pack the data into the transmit ring. If we
1195 * don't have room, set the OACTIVE flag and wait
1196 * for the NIC to drain the ring.
1197 */
1198 if (stge_encap(sc, &m_head)) {
1199 if (m_head == NULL)
1200 break;
1201 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1202 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1203 break;
1204 }
1205
1206 enq++;
1207 /*
1208 * If there's a BPF listener, bounce a copy of this frame
1209 * to him.
1210 */
1211 ETHER_BPF_MTAP(ifp, m_head);
1212 }
1213
1214 if (enq > 0) {
1215 /* Transmit */
1216 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1217
1218 /* Set a timeout in case the chip goes out to lunch. */
1219 sc->sc_watchdog_timer = 5;
1220 }
1221}
1222
1223/*
1224 * stge_watchdog:
1225 *
1226 * Watchdog timer handler.
1227 */
1228static void
1229stge_watchdog(struct stge_softc *sc)
1230{
1231 struct ifnet *ifp;
1232
1233 STGE_LOCK_ASSERT(sc);
1234
1235 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1236 return;
1237
1238 ifp = sc->sc_ifp;
1239 if_printf(sc->sc_ifp, "device timeout\n");
1240 ifp->if_oerrors++;
1241 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1242 stge_init_locked(sc);
1243 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1244 stge_start_locked(ifp);
1245}
1246
1247/*
1248 * stge_ioctl: [ifnet interface function]
1249 *
1250 * Handle control requests from the operator.
1251 */
1252static int
1253stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1254{
1255 struct stge_softc *sc;
1256 struct ifreq *ifr;
1257 struct mii_data *mii;
1258 int error, mask;
1259
1260 sc = ifp->if_softc;
1261 ifr = (struct ifreq *)data;
1262 error = 0;
1263 switch (cmd) {
1264 case SIOCSIFMTU:
1265 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1266 error = EINVAL;
1267 else if (ifp->if_mtu != ifr->ifr_mtu) {
1268 ifp->if_mtu = ifr->ifr_mtu;
1269 STGE_LOCK(sc);
1270 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1271 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1272 stge_init_locked(sc);
1273 }
1274 STGE_UNLOCK(sc);
1275 }
1276 break;
1277 case SIOCSIFFLAGS:
1278 STGE_LOCK(sc);
1279 if ((ifp->if_flags & IFF_UP) != 0) {
1280 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1281 if (((ifp->if_flags ^ sc->sc_if_flags)
1282 & IFF_PROMISC) != 0)
1283 stge_set_filter(sc);
1284 } else {
1285 if (sc->sc_detach == 0)
1286 stge_init_locked(sc);
1287 }
1288 } else {
1289 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1290 stge_stop(sc);
1291 }
1292 sc->sc_if_flags = ifp->if_flags;
1293 STGE_UNLOCK(sc);
1294 break;
1295 case SIOCADDMULTI:
1296 case SIOCDELMULTI:
1297 STGE_LOCK(sc);
1298 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1299 stge_set_multi(sc);
1300 STGE_UNLOCK(sc);
1301 break;
1302 case SIOCSIFMEDIA:
1303 case SIOCGIFMEDIA:
1304 mii = device_get_softc(sc->sc_miibus);
1305 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1306 break;
1307 case SIOCSIFCAP:
1308 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1309#ifdef DEVICE_POLLING
1310 if ((mask & IFCAP_POLLING) != 0) {
1311 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1312 error = ether_poll_register(stge_poll, ifp);
1313 if (error != 0)
1314 break;
1315 STGE_LOCK(sc);
1316 CSR_WRITE_2(sc, STGE_IntEnable, 0);
1317 ifp->if_capenable |= IFCAP_POLLING;
1318 STGE_UNLOCK(sc);
1319 } else {
1320 error = ether_poll_deregister(ifp);
1321 if (error != 0)
1322 break;
1323 STGE_LOCK(sc);
1324 CSR_WRITE_2(sc, STGE_IntEnable,
1325 sc->sc_IntEnable);
1326 ifp->if_capenable &= ~IFCAP_POLLING;
1327 STGE_UNLOCK(sc);
1328 }
1329 }
1330#endif
1331 if ((mask & IFCAP_HWCSUM) != 0) {
1332 ifp->if_capenable ^= IFCAP_HWCSUM;
1333 if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1334 (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1335 ifp->if_hwassist = STGE_CSUM_FEATURES;
1336 else
1337 ifp->if_hwassist = 0;
1338 }
1339 if ((mask & IFCAP_WOL) != 0 &&
1340 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1341 if ((mask & IFCAP_WOL_MAGIC) != 0)
1342 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1343 }
1344 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1345 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1346 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1347 STGE_LOCK(sc);
1348 stge_vlan_setup(sc);
1349 STGE_UNLOCK(sc);
1350 }
1351 }
1352 VLAN_CAPABILITIES(ifp);
1353 break;
1354 default:
1355 error = ether_ioctl(ifp, cmd, data);
1356 break;
1357 }
1358
1359 return (error);
1360}
1361
1362static void
1363stge_link_task(void *arg, int pending)
1364{
1365 struct stge_softc *sc;
1366 struct mii_data *mii;
1367 uint32_t v, ac;
1368 int i;
1369
1370 sc = (struct stge_softc *)arg;
1371 STGE_LOCK(sc);
1372
1373 mii = device_get_softc(sc->sc_miibus);
1374 if (mii->mii_media_status & IFM_ACTIVE) {
1375 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1376 sc->sc_link = 1;
1377 } else
1378 sc->sc_link = 0;
1379
1380 sc->sc_MACCtrl = 0;
1381 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1382 sc->sc_MACCtrl |= MC_DuplexSelect;
1383 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
1384 sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1385 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
1386 sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1387 /*
1388 * Update STGE_MACCtrl register depending on link status.
1389 * (duplex, flow control etc)
1390 */
1391 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1392 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1393 v |= sc->sc_MACCtrl;
1394 CSR_WRITE_4(sc, STGE_MACCtrl, v);
1395 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1396 /* Duplex setting changed, reset Tx/Rx functions. */
1397 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1398 ac |= AC_TxReset | AC_RxReset;
1399 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1400 for (i = 0; i < STGE_TIMEOUT; i++) {
1401 DELAY(100);
1402 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1403 break;
1404 }
1405 if (i == STGE_TIMEOUT)
1406 device_printf(sc->sc_dev, "reset failed to complete\n");
1407 }
1408 STGE_UNLOCK(sc);
1409}
1410
1411static __inline int
1412stge_tx_error(struct stge_softc *sc)
1413{
1414 uint32_t txstat;
1415 int error;
1416
1417 for (error = 0;;) {
1418 txstat = CSR_READ_4(sc, STGE_TxStatus);
1419 if ((txstat & TS_TxComplete) == 0)
1420 break;
1421 /* Tx underrun */
1422 if ((txstat & TS_TxUnderrun) != 0) {
1423 /*
1424 * XXX
1425 * There should be a more better way to recover
1426 * from Tx underrun instead of a full reset.
1427 */
1428 if (sc->sc_nerr++ < STGE_MAXERR)
1429 device_printf(sc->sc_dev, "Tx underrun, "
1430 "resetting...\n");
1431 if (sc->sc_nerr == STGE_MAXERR)
1432 device_printf(sc->sc_dev, "too many errors; "
1433 "not reporting any more\n");
1434 error = -1;
1435 break;
1436 }
1437 /* Maximum/Late collisions, Re-enable Tx MAC. */
1438 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1439 CSR_WRITE_4(sc, STGE_MACCtrl,
1440 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1441 MC_TxEnable);
1442 }
1443
1444 return (error);
1445}
1446
1447/*
1448 * stge_intr:
1449 *
1450 * Interrupt service routine.
1451 */
1452static void
1453stge_intr(void *arg)
1454{
1455 struct stge_softc *sc;
1456 struct ifnet *ifp;
1457 int reinit;
1458 uint16_t status;
1459
1460 sc = (struct stge_softc *)arg;
1461 ifp = sc->sc_ifp;
1462
1463 STGE_LOCK(sc);
1464
1465#ifdef DEVICE_POLLING
1466 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1467 goto done_locked;
1468#endif
1469 status = CSR_READ_2(sc, STGE_IntStatus);
1470 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1471 goto done_locked;
1472
1473 /* Disable interrupts. */
1474 for (reinit = 0;;) {
1475 status = CSR_READ_2(sc, STGE_IntStatusAck);
1476 status &= sc->sc_IntEnable;
1477 if (status == 0)
1478 break;
1479 /* Host interface errors. */
1480 if ((status & IS_HostError) != 0) {
1481 device_printf(sc->sc_dev,
1482 "Host interface error, resetting...\n");
1483 reinit = 1;
1484 goto force_init;
1485 }
1486
1487 /* Receive interrupts. */
1488 if ((status & IS_RxDMAComplete) != 0) {
1489 stge_rxeof(sc);
1490 if ((status & IS_RFDListEnd) != 0)
1491 CSR_WRITE_4(sc, STGE_DMACtrl,
1492 DMAC_RxDMAPollNow);
1493 }
1494
1495 /* Transmit interrupts. */
1496 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1497 stge_txeof(sc);
1498
1499 /* Transmission errors.*/
1500 if ((status & IS_TxComplete) != 0) {
1501 if ((reinit = stge_tx_error(sc)) != 0)
1502 break;
1503 }
1504 }
1505
1506force_init:
1507 if (reinit != 0) {
1508 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1509 stge_init_locked(sc);
1510 }
1511
1512 /* Re-enable interrupts. */
1513 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1514
1515 /* Try to get more packets going. */
1516 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1517 stge_start_locked(ifp);
1518
1519done_locked:
1520 STGE_UNLOCK(sc);
1521}
1522
1523/*
1524 * stge_txeof:
1525 *
1526 * Helper; handle transmit interrupts.
1527 */
1528static void
1529stge_txeof(struct stge_softc *sc)
1530{
1531 struct ifnet *ifp;
1532 struct stge_txdesc *txd;
1533 uint64_t control;
1534 int cons;
1535
1536 STGE_LOCK_ASSERT(sc);
1537
1538 ifp = sc->sc_ifp;
1539
1540 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1541 if (txd == NULL)
1542 return;
1543 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1544 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1545
1546 /*
1547 * Go through our Tx list and free mbufs for those
1548 * frames which have been transmitted.
1549 */
1550 for (cons = sc->sc_cdata.stge_tx_cons;;
1551 cons = (cons + 1) % STGE_TX_RING_CNT) {
1552 if (sc->sc_cdata.stge_tx_cnt <= 0)
1553 break;
1554 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1555 if ((control & TFD_TFDDone) == 0)
1556 break;
1557 sc->sc_cdata.stge_tx_cnt--;
1558 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1559
1560 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1561 BUS_DMASYNC_POSTWRITE);
1562 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1563
1564 /* Output counter is updated with statistics register */
1565 m_freem(txd->tx_m);
1566 txd->tx_m = NULL;
1567 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1568 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1569 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1570 }
1571 sc->sc_cdata.stge_tx_cons = cons;
1572 if (sc->sc_cdata.stge_tx_cnt == 0)
1573 sc->sc_watchdog_timer = 0;
1574
1575 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1576 sc->sc_cdata.stge_tx_ring_map,
1577 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1578}
1579
1580static __inline void
1581stge_discard_rxbuf(struct stge_softc *sc, int idx)
1582{
1583 struct stge_rfd *rfd;
1584
1585 rfd = &sc->sc_rdata.stge_rx_ring[idx];
1586 rfd->rfd_status = 0;
1587}
1588
1589#ifndef __NO_STRICT_ALIGNMENT
1590/*
1591 * It seems that TC9021's DMA engine has alignment restrictions in
1592 * DMA scatter operations. The first DMA segment has no address
1593 * alignment restrictins but the rest should be aligned on 4(?) bytes
1594 * boundary. Otherwise it would corrupt random memory. Since we don't
1595 * know which one is used for the first segment in advance we simply
1596 * don't align at all.
1597 * To avoid copying over an entire frame to align, we allocate a new
1598 * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1599 * prepended into the existing mbuf chain.
1600 */
1601static __inline struct mbuf *
1602stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1603{
1604 struct mbuf *n;
1605
1606 n = NULL;
1607 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1608 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1609 m->m_data += ETHER_HDR_LEN;
1610 n = m;
1611 } else {
1612 MGETHDR(n, M_DONTWAIT, MT_DATA);
1613 if (n != NULL) {
1614 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1615 m->m_data += ETHER_HDR_LEN;
1616 m->m_len -= ETHER_HDR_LEN;
1617 n->m_len = ETHER_HDR_LEN;
1618 M_MOVE_PKTHDR(n, m);
1619 n->m_next = m;
1620 } else
1621 m_freem(m);
1622 }
1623
1624 return (n);
1625}
1626#endif
1627
1628/*
1629 * stge_rxeof:
1630 *
1631 * Helper; handle receive interrupts.
1632 */
1633static int
1634stge_rxeof(struct stge_softc *sc)
1635{
1636 struct ifnet *ifp;
1637 struct stge_rxdesc *rxd;
1638 struct mbuf *mp, *m;
1639 uint64_t status64;
1640 uint32_t status;
1641 int cons, prog, rx_npkts;
1642
1643 STGE_LOCK_ASSERT(sc);
1644
1645 rx_npkts = 0;
1646 ifp = sc->sc_ifp;
1647
1648 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1649 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1650
1651 prog = 0;
1652 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1653 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1654 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1655 status = RFD_RxStatus(status64);
1656 if ((status & RFD_RFDDone) == 0)
1657 break;
1658#ifdef DEVICE_POLLING
1659 if (ifp->if_capenable & IFCAP_POLLING) {
1660 if (sc->sc_cdata.stge_rxcycles <= 0)
1661 break;
1662 sc->sc_cdata.stge_rxcycles--;
1663 }
1664#endif
1665 prog++;
1666 rxd = &sc->sc_cdata.stge_rxdesc[cons];
1667 mp = rxd->rx_m;
1668
1669 /*
1670 * If the packet had an error, drop it. Note we count
1671 * the error later in the periodic stats update.
1672 */
1673 if ((status & RFD_FrameEnd) != 0 && (status &
1674 (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1675 RFD_RxAlignmentError | RFD_RxFCSError |
1676 RFD_RxLengthError)) != 0) {
1677 stge_discard_rxbuf(sc, cons);
1678 if (sc->sc_cdata.stge_rxhead != NULL) {
1679 m_freem(sc->sc_cdata.stge_rxhead);
1680 STGE_RXCHAIN_RESET(sc);
1681 }
1682 continue;
1683 }
1684 /*
1685 * Add a new receive buffer to the ring.
1686 */
1687 if (stge_newbuf(sc, cons) != 0) {
1688 ifp->if_iqdrops++;
1689 stge_discard_rxbuf(sc, cons);
1690 if (sc->sc_cdata.stge_rxhead != NULL) {
1691 m_freem(sc->sc_cdata.stge_rxhead);
1692 STGE_RXCHAIN_RESET(sc);
1693 }
1694 continue;
1695 }
1696
1697 if ((status & RFD_FrameEnd) != 0)
1698 mp->m_len = RFD_RxDMAFrameLen(status) -
1699 sc->sc_cdata.stge_rxlen;
1700 sc->sc_cdata.stge_rxlen += mp->m_len;
1701
1702 /* Chain mbufs. */
1703 if (sc->sc_cdata.stge_rxhead == NULL) {
1704 sc->sc_cdata.stge_rxhead = mp;
1705 sc->sc_cdata.stge_rxtail = mp;
1706 } else {
1707 mp->m_flags &= ~M_PKTHDR;
1708 sc->sc_cdata.stge_rxtail->m_next = mp;
1709 sc->sc_cdata.stge_rxtail = mp;
1710 }
1711
1712 if ((status & RFD_FrameEnd) != 0) {
1713 m = sc->sc_cdata.stge_rxhead;
1714 m->m_pkthdr.rcvif = ifp;
1715 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1716
1717 if (m->m_pkthdr.len > sc->sc_if_framesize) {
1718 m_freem(m);
1719 STGE_RXCHAIN_RESET(sc);
1720 continue;
1721 }
1722 /*
1723 * Set the incoming checksum information for
1724 * the packet.
1725 */
1726 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1727 if ((status & RFD_IPDetected) != 0) {
1728 m->m_pkthdr.csum_flags |=
1729 CSUM_IP_CHECKED;
1730 if ((status & RFD_IPError) == 0)
1731 m->m_pkthdr.csum_flags |=
1732 CSUM_IP_VALID;
1733 }
1734 if (((status & RFD_TCPDetected) != 0 &&
1735 (status & RFD_TCPError) == 0) ||
1736 ((status & RFD_UDPDetected) != 0 &&
1737 (status & RFD_UDPError) == 0)) {
1738 m->m_pkthdr.csum_flags |=
1739 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1740 m->m_pkthdr.csum_data = 0xffff;
1741 }
1742 }
1743
1744#ifndef __NO_STRICT_ALIGNMENT
1745 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1746 if ((m = stge_fixup_rx(sc, m)) == NULL) {
1747 STGE_RXCHAIN_RESET(sc);
1748 continue;
1749 }
1750 }
1751#endif
1752 /* Check for VLAN tagged packets. */
1753 if ((status & RFD_VLANDetected) != 0 &&
1754 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1755 m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1756 m->m_flags |= M_VLANTAG;
1757 }
1758
1759 STGE_UNLOCK(sc);
1760 /* Pass it on. */
1761 (*ifp->if_input)(ifp, m);
1762 STGE_LOCK(sc);
1763 rx_npkts++;
1764
1765 STGE_RXCHAIN_RESET(sc);
1766 }
1767 }
1768
1769 if (prog > 0) {
1770 /* Update the consumer index. */
1771 sc->sc_cdata.stge_rx_cons = cons;
1772 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1773 sc->sc_cdata.stge_rx_ring_map,
1774 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1775 }
1776 return (rx_npkts);
1777}
1778
1779#ifdef DEVICE_POLLING
1780static int
1781stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1782{
1783 struct stge_softc *sc;
1784 uint16_t status;
1785 int rx_npkts;
1786
1787 rx_npkts = 0;
1788 sc = ifp->if_softc;
1789 STGE_LOCK(sc);
1790 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1791 STGE_UNLOCK(sc);
1792 return (rx_npkts);
1793 }
1794
1795 sc->sc_cdata.stge_rxcycles = count;
1796 rx_npkts = stge_rxeof(sc);
1797 stge_txeof(sc);
1798
1799 if (cmd == POLL_AND_CHECK_STATUS) {
1800 status = CSR_READ_2(sc, STGE_IntStatus);
1801 status &= sc->sc_IntEnable;
1802 if (status != 0) {
1803 if ((status & IS_HostError) != 0) {
1804 device_printf(sc->sc_dev,
1805 "Host interface error, resetting...\n");
1806 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1807 stge_init_locked(sc);
1808 }
1809 if ((status & IS_TxComplete) != 0) {
1810 if (stge_tx_error(sc) != 0) {
1811 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1812 stge_init_locked(sc);
1813 }
1814 }
1815 }
1816
1817 }
1818
1819 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1820 stge_start_locked(ifp);
1821
1822 STGE_UNLOCK(sc);
1823 return (rx_npkts);
1824}
1825#endif /* DEVICE_POLLING */
1826
1827/*
1828 * stge_tick:
1829 *
1830 * One second timer, used to tick the MII.
1831 */
1832static void
1833stge_tick(void *arg)
1834{
1835 struct stge_softc *sc;
1836 struct mii_data *mii;
1837
1838 sc = (struct stge_softc *)arg;
1839
1840 STGE_LOCK_ASSERT(sc);
1841
1842 mii = device_get_softc(sc->sc_miibus);
1843 mii_tick(mii);
1844
1845 /* Update statistics counters. */
1846 stge_stats_update(sc);
1847
1848 /*
1849 * Relcaim any pending Tx descriptors to release mbufs in a
1850 * timely manner as we don't generate Tx completion interrupts
1851 * for every frame. This limits the delay to a maximum of one
1852 * second.
1853 */
1854 if (sc->sc_cdata.stge_tx_cnt != 0)
1855 stge_txeof(sc);
1856
1857 stge_watchdog(sc);
1858
1859 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1860}
1861
1862/*
1863 * stge_stats_update:
1864 *
1865 * Read the TC9021 statistics counters.
1866 */
1867static void
1868stge_stats_update(struct stge_softc *sc)
1869{
1870 struct ifnet *ifp;
1871
1872 STGE_LOCK_ASSERT(sc);
1873
1874 ifp = sc->sc_ifp;
1875
1876 CSR_READ_4(sc,STGE_OctetRcvOk);
1877
1878 ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1879
1880 ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1881
1882 CSR_READ_4(sc, STGE_OctetXmtdOk);
1883
1884 ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1885
1886 ifp->if_collisions +=
1887 CSR_READ_4(sc, STGE_LateCollisions) +
1888 CSR_READ_4(sc, STGE_MultiColFrames) +
1889 CSR_READ_4(sc, STGE_SingleColFrames);
1890
1891 ifp->if_oerrors +=
1892 CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1893 CSR_READ_2(sc, STGE_FramesWEXDeferal);
1894}
1895
1896/*
1897 * stge_reset:
1898 *
1899 * Perform a soft reset on the TC9021.
1900 */
1901static void
1902stge_reset(struct stge_softc *sc, uint32_t how)
1903{
1904 uint32_t ac;
1905 uint8_t v;
1906 int i, dv;
1907
1908 STGE_LOCK_ASSERT(sc);
1909
1910 dv = 5000;
1911 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1912 switch (how) {
1913 case STGE_RESET_TX:
1914 ac |= AC_TxReset | AC_FIFO;
1915 dv = 100;
1916 break;
1917 case STGE_RESET_RX:
1918 ac |= AC_RxReset | AC_FIFO;
1919 dv = 100;
1920 break;
1921 case STGE_RESET_FULL:
1922 default:
1923 /*
1924 * Only assert RstOut if we're fiber. We need GMII clocks
1925 * to be present in order for the reset to complete on fiber
1926 * cards.
1927 */
1928 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1929 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1930 (sc->sc_usefiber ? AC_RstOut : 0);
1931 break;
1932 }
1933
1934 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1935
1936 /* Account for reset problem at 10Mbps. */
1937 DELAY(dv);
1938
1939 for (i = 0; i < STGE_TIMEOUT; i++) {
1940 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1941 break;
1942 DELAY(dv);
1943 }
1944
1945 if (i == STGE_TIMEOUT)
1946 device_printf(sc->sc_dev, "reset failed to complete\n");
1947
1948 /* Set LED, from Linux IPG driver. */
1949 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1950 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1951 if ((sc->sc_led & 0x01) != 0)
1952 ac |= AC_LEDMode;
1953 if ((sc->sc_led & 0x03) != 0)
1954 ac |= AC_LEDModeBit1;
1955 if ((sc->sc_led & 0x08) != 0)
1956 ac |= AC_LEDSpeed;
1957 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1958
1959 /* Set PHY, from Linux IPG driver */
1960 v = CSR_READ_1(sc, STGE_PhySet);
1961 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1962 v |= ((sc->sc_led & 0x70) >> 4);
1963 CSR_WRITE_1(sc, STGE_PhySet, v);
1964}
1965
1966/*
1967 * stge_init: [ ifnet interface function ]
1968 *
1969 * Initialize the interface.
1970 */
1971static void
1972stge_init(void *xsc)
1973{
1974 struct stge_softc *sc;
1975
1976 sc = (struct stge_softc *)xsc;
1977 STGE_LOCK(sc);
1978 stge_init_locked(sc);
1979 STGE_UNLOCK(sc);
1980}
1981
1982static void
1983stge_init_locked(struct stge_softc *sc)
1984{
1985 struct ifnet *ifp;
1986 struct mii_data *mii;
1987 uint16_t eaddr[3];
1988 uint32_t v;
1989 int error;
1990
1991 STGE_LOCK_ASSERT(sc);
1992
1993 ifp = sc->sc_ifp;
1994 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1995 return;
1996 mii = device_get_softc(sc->sc_miibus);
1997
1998 /*
1999 * Cancel any pending I/O.
2000 */
2001 stge_stop(sc);
2002
2003 /*
2004 * Reset the chip to a known state.
2005 */
2006 stge_reset(sc, STGE_RESET_FULL);
2007
2008 /* Init descriptors. */
2009 error = stge_init_rx_ring(sc);
2010 if (error != 0) {
2011 device_printf(sc->sc_dev,
2012 "initialization failed: no memory for rx buffers\n");
2013 stge_stop(sc);
2014 goto out;
2015 }
2016 stge_init_tx_ring(sc);
2017
2018 /* Set the station address. */
2019 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2020 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2021 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2022 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2023
2024 /*
2025 * Set the statistics masks. Disable all the RMON stats,
2026 * and disable selected stats in the non-RMON stats registers.
2027 */
2028 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2029 CSR_WRITE_4(sc, STGE_StatisticsMask,
2030 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2031 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2032 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2033 (1U << 21));
2034
2035 /* Set up the receive filter. */
2036 stge_set_filter(sc);
2037 /* Program multicast filter. */
2038 stge_set_multi(sc);
2039
2040 /*
2041 * Give the transmit and receive ring to the chip.
2042 */
2043 CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2044 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2045 CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2046 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2047
2048 CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2049 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2050 CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2051 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2052
2053 /*
2054 * Initialize the Tx auto-poll period. It's OK to make this number
2055 * large (255 is the max, but we use 127) -- we explicitly kick the
2056 * transmit engine when there's actually a packet.
2057 */
2058 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2059
2060 /* ..and the Rx auto-poll period. */
2061 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2062
2063 /* Initialize the Tx start threshold. */
2064 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2065
2066 /* Rx DMA thresholds, from Linux */
2067 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2068 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2069
2070 /* Rx early threhold, from Linux */
2071 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2072
2073 /* Tx DMA thresholds, from Linux */
2074 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2075 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2076
2077 /*
2078 * Initialize the Rx DMA interrupt control register. We
2079 * request an interrupt after every incoming packet, but
2080 * defer it for sc_rxint_dmawait us. When the number of
2081 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2082 * deferring the interrupt, and signal it immediately.
2083 */
2084 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2085 RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2086 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2087
2088 /*
2089 * Initialize the interrupt mask.
2090 */
2091 sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2092 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2093#ifdef DEVICE_POLLING
2094 /* Disable interrupts if we are polling. */
2095 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2096 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2097 else
2098#endif
2099 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2100
2101 /*
2102 * Configure the DMA engine.
2103 * XXX Should auto-tune TxBurstLimit.
2104 */
2105 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2106
2107 /*
2108 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2109 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2110 * in the Rx FIFO.
2111 */
2112 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2113 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2114
2115 /*
2116 * Set the maximum frame size.
2117 */
2118 sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2119 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2120
2121 /*
2122 * Initialize MacCtrl -- do it before setting the media,
2123 * as setting the media will actually program the register.
2124 *
2125 * Note: We have to poke the IFS value before poking
2126 * anything else.
2127 */
2128 /* Tx/Rx MAC should be disabled before programming IFS.*/
2129 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2130
2131 stge_vlan_setup(sc);
2132
2133 if (sc->sc_rev >= 6) { /* >= B.2 */
2134 /* Multi-frag frame bug work-around. */
2135 CSR_WRITE_2(sc, STGE_DebugCtrl,
2136 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2137
2138 /* Tx Poll Now bug work-around. */
2139 CSR_WRITE_2(sc, STGE_DebugCtrl,
2140 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2141 /* Tx Poll Now bug work-around. */
2142 CSR_WRITE_2(sc, STGE_DebugCtrl,
2143 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2144 }
2145
2146 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2147 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2148 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2149 /*
2150 * It seems that transmitting frames without checking the state of
2151 * Rx/Tx MAC wedge the hardware.
2152 */
2153 stge_start_tx(sc);
2154 stge_start_rx(sc);
2155
2156 sc->sc_link = 0;
2157 /*
2158 * Set the current media.
2159 */
2160 mii_mediachg(mii);
2161
2162 /*
2163 * Start the one second MII clock.
2164 */
2165 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2166
2167 /*
2168 * ...all done!
2169 */
2170 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2171 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2172
2173 out:
2174 if (error != 0)
2175 device_printf(sc->sc_dev, "interface not running\n");
2176}
2177
2178static void
2179stge_vlan_setup(struct stge_softc *sc)
2180{
2181 struct ifnet *ifp;
2182 uint32_t v;
2183
2184 ifp = sc->sc_ifp;
2185 /*
2186 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2187 * MC_AutoVLANuntagging bit.
2188 * MC_AutoVLANtagging bit selects which VLAN source to use
2189 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2190 * bit has priority over MC_AutoVLANtagging bit. So we always
2191 * use TFC instead of STGE_VLANTag register.
2192 */
2193 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2194 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2195 v |= MC_AutoVLANuntagging;
2196 else
2197 v &= ~MC_AutoVLANuntagging;
2198 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2199}
2200
2201/*
2202 * Stop transmission on the interface.
2203 */
2204static void
2205stge_stop(struct stge_softc *sc)
2206{
2207 struct ifnet *ifp;
2208 struct stge_txdesc *txd;
2209 struct stge_rxdesc *rxd;
2210 uint32_t v;
2211 int i;
2212
2213 STGE_LOCK_ASSERT(sc);
2214 /*
2215 * Stop the one second clock.
2216 */
2217 callout_stop(&sc->sc_tick_ch);
2218 sc->sc_watchdog_timer = 0;
2219
2220 /*
2221 * Disable interrupts.
2222 */
2223 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2224
2225 /*
2226 * Stop receiver, transmitter, and stats update.
2227 */
2228 stge_stop_rx(sc);
2229 stge_stop_tx(sc);
2230 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2231 v |= MC_StatisticsDisable;
2232 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2233
2234 /*
2235 * Stop the transmit and receive DMA.
2236 */
2237 stge_dma_wait(sc);
2238 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2239 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2240 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2241 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2242
2243 /*
2244 * Free RX and TX mbufs still in the queues.
2245 */
2246 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2247 rxd = &sc->sc_cdata.stge_rxdesc[i];
2248 if (rxd->rx_m != NULL) {
2249 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2250 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2251 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2252 rxd->rx_dmamap);
2253 m_freem(rxd->rx_m);
2254 rxd->rx_m = NULL;
2255 }
2256 }
2257 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2258 txd = &sc->sc_cdata.stge_txdesc[i];
2259 if (txd->tx_m != NULL) {
2260 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2261 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2262 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2263 txd->tx_dmamap);
2264 m_freem(txd->tx_m);
2265 txd->tx_m = NULL;
2266 }
2267 }
2268
2269 /*
2270 * Mark the interface down and cancel the watchdog timer.
2271 */
2272 ifp = sc->sc_ifp;
2273 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2274 sc->sc_link = 0;
2275}
2276
2277static void
2278stge_start_tx(struct stge_softc *sc)
2279{
2280 uint32_t v;
2281 int i;
2282
2283 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2284 if ((v & MC_TxEnabled) != 0)
2285 return;
2286 v |= MC_TxEnable;
2287 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2288 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2289 for (i = STGE_TIMEOUT; i > 0; i--) {
2290 DELAY(10);
2291 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2292 if ((v & MC_TxEnabled) != 0)
2293 break;
2294 }
2295 if (i == 0)
2296 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2297}
2298
2299static void
2300stge_start_rx(struct stge_softc *sc)
2301{
2302 uint32_t v;
2303 int i;
2304
2305 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2306 if ((v & MC_RxEnabled) != 0)
2307 return;
2308 v |= MC_RxEnable;
2309 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2310 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2311 for (i = STGE_TIMEOUT; i > 0; i--) {
2312 DELAY(10);
2313 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2314 if ((v & MC_RxEnabled) != 0)
2315 break;
2316 }
2317 if (i == 0)
2318 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2319}
2320
2321static void
2322stge_stop_tx(struct stge_softc *sc)
2323{
2324 uint32_t v;
2325 int i;
2326
2327 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2328 if ((v & MC_TxEnabled) == 0)
2329 return;
2330 v |= MC_TxDisable;
2331 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2332 for (i = STGE_TIMEOUT; i > 0; i--) {
2333 DELAY(10);
2334 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2335 if ((v & MC_TxEnabled) == 0)
2336 break;
2337 }
2338 if (i == 0)
2339 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2340}
2341
2342static void
2343stge_stop_rx(struct stge_softc *sc)
2344{
2345 uint32_t v;
2346 int i;
2347
2348 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2349 if ((v & MC_RxEnabled) == 0)
2350 return;
2351 v |= MC_RxDisable;
2352 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2353 for (i = STGE_TIMEOUT; i > 0; i--) {
2354 DELAY(10);
2355 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2356 if ((v & MC_RxEnabled) == 0)
2357 break;
2358 }
2359 if (i == 0)
2360 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2361}
2362
2363static void
2364stge_init_tx_ring(struct stge_softc *sc)
2365{
2366 struct stge_ring_data *rd;
2367 struct stge_txdesc *txd;
2368 bus_addr_t addr;
2369 int i;
2370
2371 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2372 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2373
2374 sc->sc_cdata.stge_tx_prod = 0;
2375 sc->sc_cdata.stge_tx_cons = 0;
2376 sc->sc_cdata.stge_tx_cnt = 0;
2377
2378 rd = &sc->sc_rdata;
2379 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2380 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2381 if (i == (STGE_TX_RING_CNT - 1))
2382 addr = STGE_TX_RING_ADDR(sc, 0);
2383 else
2384 addr = STGE_TX_RING_ADDR(sc, i + 1);
2385 rd->stge_tx_ring[i].tfd_next = htole64(addr);
2386 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2387 txd = &sc->sc_cdata.stge_txdesc[i];
2388 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2389 }
2390
2391 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2392 sc->sc_cdata.stge_tx_ring_map,
2393 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2394
2395}
2396
2397static int
2398stge_init_rx_ring(struct stge_softc *sc)
2399{
2400 struct stge_ring_data *rd;
2401 bus_addr_t addr;
2402 int i;
2403
2404 sc->sc_cdata.stge_rx_cons = 0;
2405 STGE_RXCHAIN_RESET(sc);
2406
2407 rd = &sc->sc_rdata;
2408 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2409 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2410 if (stge_newbuf(sc, i) != 0)
2411 return (ENOBUFS);
2412 if (i == (STGE_RX_RING_CNT - 1))
2413 addr = STGE_RX_RING_ADDR(sc, 0);
2414 else
2415 addr = STGE_RX_RING_ADDR(sc, i + 1);
2416 rd->stge_rx_ring[i].rfd_next = htole64(addr);
2417 rd->stge_rx_ring[i].rfd_status = 0;
2418 }
2419
2420 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2421 sc->sc_cdata.stge_rx_ring_map,
2422 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2423
2424 return (0);
2425}
2426
2427/*
2428 * stge_newbuf:
2429 *
2430 * Add a receive buffer to the indicated descriptor.
2431 */
2432static int
2433stge_newbuf(struct stge_softc *sc, int idx)
2434{
2435 struct stge_rxdesc *rxd;
2436 struct stge_rfd *rfd;
2437 struct mbuf *m;
2438 bus_dma_segment_t segs[1];
2439 bus_dmamap_t map;
2440 int nsegs;
2441
2442 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2443 if (m == NULL)
2444 return (ENOBUFS);
2445 m->m_len = m->m_pkthdr.len = MCLBYTES;
2446 /*
2447 * The hardware requires 4bytes aligned DMA address when JUMBO
2448 * frame is used.
2449 */
2450 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2451 m_adj(m, ETHER_ALIGN);
2452
2453 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2454 sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2455 m_freem(m);
2456 return (ENOBUFS);
2457 }
2458 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2459
2460 rxd = &sc->sc_cdata.stge_rxdesc[idx];
2461 if (rxd->rx_m != NULL) {
2462 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2463 BUS_DMASYNC_POSTREAD);
2464 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2465 }
2466 map = rxd->rx_dmamap;
2467 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2468 sc->sc_cdata.stge_rx_sparemap = map;
2469 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2470 BUS_DMASYNC_PREREAD);
2471 rxd->rx_m = m;
2472
2473 rfd = &sc->sc_rdata.stge_rx_ring[idx];
2474 rfd->rfd_frag.frag_word0 =
2475 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2476 rfd->rfd_status = 0;
2477
2478 return (0);
2479}
2480
2481/*
2482 * stge_set_filter:
2483 *
2484 * Set up the receive filter.
2485 */
2486static void
2487stge_set_filter(struct stge_softc *sc)
2488{
2489 struct ifnet *ifp;
2490 uint16_t mode;
2491
2492 STGE_LOCK_ASSERT(sc);
2493
2494 ifp = sc->sc_ifp;
2495
2496 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2497 mode |= RM_ReceiveUnicast;
2498 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2499 mode |= RM_ReceiveBroadcast;
2500 else
2501 mode &= ~RM_ReceiveBroadcast;
2502 if ((ifp->if_flags & IFF_PROMISC) != 0)
2503 mode |= RM_ReceiveAllFrames;
2504 else
2505 mode &= ~RM_ReceiveAllFrames;
2506
2507 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2508}
2509
2510static void
2511stge_set_multi(struct stge_softc *sc)
2512{
2513 struct ifnet *ifp;
2514 struct ifmultiaddr *ifma;
2515 uint32_t crc;
2516 uint32_t mchash[2];
2517 uint16_t mode;
2518 int count;
2519
2520 STGE_LOCK_ASSERT(sc);
2521
2522 ifp = sc->sc_ifp;
2523
2524 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2525 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2526 if ((ifp->if_flags & IFF_PROMISC) != 0)
2527 mode |= RM_ReceiveAllFrames;
2528 else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2529 mode |= RM_ReceiveMulticast;
2530 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2531 return;
2532 }
2533
2534 /* clear existing filters. */
2535 CSR_WRITE_4(sc, STGE_HashTable0, 0);
2536 CSR_WRITE_4(sc, STGE_HashTable1, 0);
2537
2538 /*
2539 * Set up the multicast address filter by passing all multicast
2540 * addresses through a CRC generator, and then using the low-order
2541 * 6 bits as an index into the 64 bit multicast hash table. The
2542 * high order bits select the register, while the rest of the bits
2543 * select the bit within the register.
2544 */
2545
2546 bzero(mchash, sizeof(mchash));
2547
2548 count = 0;
2549 if_maddr_rlock(sc->sc_ifp);
2550 TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2551 if (ifma->ifma_addr->sa_family != AF_LINK)
2552 continue;
2553 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2554 ifma->ifma_addr), ETHER_ADDR_LEN);
2555
2556 /* Just want the 6 least significant bits. */
2557 crc &= 0x3f;
2558
2559 /* Set the corresponding bit in the hash table. */
2560 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2561 count++;
2562 }
2563 if_maddr_runlock(ifp);
2564
2565 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2566 if (count > 0)
2567 mode |= RM_ReceiveMulticastHash;
2568 else
2569 mode &= ~RM_ReceiveMulticastHash;
2570
2571 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2572 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2573 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2574}
2575
2576static int
2577sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2578{
2579 int error, value;
2580
2581 if (!arg1)
2582 return (EINVAL);
2583 value = *(int *)arg1;
2584 error = sysctl_handle_int(oidp, &value, 0, req);
2585 if (error || !req->newptr)
2586 return (error);
2587 if (value < low || value > high)
2588 return (EINVAL);
2589 *(int *)arg1 = value;
2590
2591 return (0);
2592}
2593
2594static int
2595sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2596{
2597 return (sysctl_int_range(oidp, arg1, arg2, req,
2598 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2599}
2600
2601static int
2602sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2603{
2604 return (sysctl_int_range(oidp, arg1, arg2, req,
2605 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2606}
95 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023,
96 "Sundance ST-1023 Gigabit Ethernet" },
97
98 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021,
99 "Sundance ST-2021 Gigabit Ethernet" },
100
101 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021,
102 "Tamarack TC9021 Gigabit Ethernet" },
103
104 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT,
105 "Tamarack TC9021 Gigabit Ethernet" },
106
107 /*
108 * The Sundance sample boards use the Sundance vendor ID,
109 * but the Tamarack product ID.
110 */
111 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021,
112 "Sundance TC9021 Gigabit Ethernet" },
113
114 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT,
115 "Sundance TC9021 Gigabit Ethernet" },
116
117 { VENDOR_DLINK, DEVICEID_DLINK_DL4000,
118 "D-Link DL-4000 Gigabit Ethernet" },
119
120 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021,
121 "Antares Gigabit Ethernet" }
122};
123
124static int stge_probe(device_t);
125static int stge_attach(device_t);
126static int stge_detach(device_t);
127static int stge_shutdown(device_t);
128static int stge_suspend(device_t);
129static int stge_resume(device_t);
130
131static int stge_encap(struct stge_softc *, struct mbuf **);
132static void stge_start(struct ifnet *);
133static void stge_start_locked(struct ifnet *);
134static void stge_watchdog(struct stge_softc *);
135static int stge_ioctl(struct ifnet *, u_long, caddr_t);
136static void stge_init(void *);
137static void stge_init_locked(struct stge_softc *);
138static void stge_vlan_setup(struct stge_softc *);
139static void stge_stop(struct stge_softc *);
140static void stge_start_tx(struct stge_softc *);
141static void stge_start_rx(struct stge_softc *);
142static void stge_stop_tx(struct stge_softc *);
143static void stge_stop_rx(struct stge_softc *);
144
145static void stge_reset(struct stge_softc *, uint32_t);
146static int stge_eeprom_wait(struct stge_softc *);
147static void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
148static void stge_tick(void *);
149static void stge_stats_update(struct stge_softc *);
150static void stge_set_filter(struct stge_softc *);
151static void stge_set_multi(struct stge_softc *);
152
153static void stge_link_task(void *, int);
154static void stge_intr(void *);
155static __inline int stge_tx_error(struct stge_softc *);
156static void stge_txeof(struct stge_softc *);
157static int stge_rxeof(struct stge_softc *);
158static __inline void stge_discard_rxbuf(struct stge_softc *, int);
159static int stge_newbuf(struct stge_softc *, int);
160#ifndef __NO_STRICT_ALIGNMENT
161static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
162#endif
163
164static int stge_miibus_readreg(device_t, int, int);
165static int stge_miibus_writereg(device_t, int, int, int);
166static void stge_miibus_statchg(device_t);
167static int stge_mediachange(struct ifnet *);
168static void stge_mediastatus(struct ifnet *, struct ifmediareq *);
169
170static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
171static int stge_dma_alloc(struct stge_softc *);
172static void stge_dma_free(struct stge_softc *);
173static void stge_dma_wait(struct stge_softc *);
174static void stge_init_tx_ring(struct stge_softc *);
175static int stge_init_rx_ring(struct stge_softc *);
176#ifdef DEVICE_POLLING
177static int stge_poll(struct ifnet *, enum poll_cmd, int);
178#endif
179
180static void stge_setwol(struct stge_softc *);
181static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
182static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
183static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
184
185/*
186 * MII bit-bang glue
187 */
188static uint32_t stge_mii_bitbang_read(device_t);
189static void stge_mii_bitbang_write(device_t, uint32_t);
190
191static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
192 stge_mii_bitbang_read,
193 stge_mii_bitbang_write,
194 {
195 PC_MgmtData, /* MII_BIT_MDO */
196 PC_MgmtData, /* MII_BIT_MDI */
197 PC_MgmtClk, /* MII_BIT_MDC */
198 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
199 0, /* MII_BIT_DIR_PHY_HOST */
200 }
201};
202
203static device_method_t stge_methods[] = {
204 /* Device interface */
205 DEVMETHOD(device_probe, stge_probe),
206 DEVMETHOD(device_attach, stge_attach),
207 DEVMETHOD(device_detach, stge_detach),
208 DEVMETHOD(device_shutdown, stge_shutdown),
209 DEVMETHOD(device_suspend, stge_suspend),
210 DEVMETHOD(device_resume, stge_resume),
211
212 /* MII interface */
213 DEVMETHOD(miibus_readreg, stge_miibus_readreg),
214 DEVMETHOD(miibus_writereg, stge_miibus_writereg),
215 DEVMETHOD(miibus_statchg, stge_miibus_statchg),
216
217 DEVMETHOD_END
218};
219
220static driver_t stge_driver = {
221 "stge",
222 stge_methods,
223 sizeof(struct stge_softc)
224};
225
226static devclass_t stge_devclass;
227
228DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
229DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
230
231static struct resource_spec stge_res_spec_io[] = {
232 { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE },
233 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
234 { -1, 0, 0 }
235};
236
237static struct resource_spec stge_res_spec_mem[] = {
238 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE },
239 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
240 { -1, 0, 0 }
241};
242
243/*
244 * stge_mii_bitbang_read: [mii bit-bang interface function]
245 *
246 * Read the MII serial port for the MII bit-bang module.
247 */
248static uint32_t
249stge_mii_bitbang_read(device_t dev)
250{
251 struct stge_softc *sc;
252 uint32_t val;
253
254 sc = device_get_softc(dev);
255
256 val = CSR_READ_1(sc, STGE_PhyCtrl);
257 CSR_BARRIER(sc, STGE_PhyCtrl, 1,
258 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
259 return (val);
260}
261
262/*
263 * stge_mii_bitbang_write: [mii big-bang interface function]
264 *
265 * Write the MII serial port for the MII bit-bang module.
266 */
267static void
268stge_mii_bitbang_write(device_t dev, uint32_t val)
269{
270 struct stge_softc *sc;
271
272 sc = device_get_softc(dev);
273
274 CSR_WRITE_1(sc, STGE_PhyCtrl, val);
275 CSR_BARRIER(sc, STGE_PhyCtrl, 1,
276 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
277}
278
279/*
280 * sc_miibus_readreg: [mii interface function]
281 *
282 * Read a PHY register on the MII of the TC9021.
283 */
284static int
285stge_miibus_readreg(device_t dev, int phy, int reg)
286{
287 struct stge_softc *sc;
288 int error, val;
289
290 sc = device_get_softc(dev);
291
292 if (reg == STGE_PhyCtrl) {
293 /* XXX allow ip1000phy read STGE_PhyCtrl register. */
294 STGE_MII_LOCK(sc);
295 error = CSR_READ_1(sc, STGE_PhyCtrl);
296 STGE_MII_UNLOCK(sc);
297 return (error);
298 }
299
300 STGE_MII_LOCK(sc);
301 val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
302 STGE_MII_UNLOCK(sc);
303 return (val);
304}
305
306/*
307 * stge_miibus_writereg: [mii interface function]
308 *
309 * Write a PHY register on the MII of the TC9021.
310 */
311static int
312stge_miibus_writereg(device_t dev, int phy, int reg, int val)
313{
314 struct stge_softc *sc;
315
316 sc = device_get_softc(dev);
317
318 STGE_MII_LOCK(sc);
319 mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
320 STGE_MII_UNLOCK(sc);
321 return (0);
322}
323
324/*
325 * stge_miibus_statchg: [mii interface function]
326 *
327 * Callback from MII layer when media changes.
328 */
329static void
330stge_miibus_statchg(device_t dev)
331{
332 struct stge_softc *sc;
333
334 sc = device_get_softc(dev);
335 taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
336}
337
338/*
339 * stge_mediastatus: [ifmedia interface function]
340 *
341 * Get the current interface media status.
342 */
343static void
344stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
345{
346 struct stge_softc *sc;
347 struct mii_data *mii;
348
349 sc = ifp->if_softc;
350 mii = device_get_softc(sc->sc_miibus);
351
352 mii_pollstat(mii);
353 ifmr->ifm_status = mii->mii_media_status;
354 ifmr->ifm_active = mii->mii_media_active;
355}
356
357/*
358 * stge_mediachange: [ifmedia interface function]
359 *
360 * Set hardware to newly-selected media.
361 */
362static int
363stge_mediachange(struct ifnet *ifp)
364{
365 struct stge_softc *sc;
366 struct mii_data *mii;
367
368 sc = ifp->if_softc;
369 mii = device_get_softc(sc->sc_miibus);
370 mii_mediachg(mii);
371
372 return (0);
373}
374
375static int
376stge_eeprom_wait(struct stge_softc *sc)
377{
378 int i;
379
380 for (i = 0; i < STGE_TIMEOUT; i++) {
381 DELAY(1000);
382 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
383 return (0);
384 }
385 return (1);
386}
387
388/*
389 * stge_read_eeprom:
390 *
391 * Read data from the serial EEPROM.
392 */
393static void
394stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
395{
396
397 if (stge_eeprom_wait(sc))
398 device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
399
400 CSR_WRITE_2(sc, STGE_EepromCtrl,
401 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
402 if (stge_eeprom_wait(sc))
403 device_printf(sc->sc_dev, "EEPROM read timed out\n");
404 *data = CSR_READ_2(sc, STGE_EepromData);
405}
406
407
408static int
409stge_probe(device_t dev)
410{
411 const struct stge_product *sp;
412 int i;
413 uint16_t vendor, devid;
414
415 vendor = pci_get_vendor(dev);
416 devid = pci_get_device(dev);
417 sp = stge_products;
418 for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
419 i++, sp++) {
420 if (vendor == sp->stge_vendorid &&
421 devid == sp->stge_deviceid) {
422 device_set_desc(dev, sp->stge_name);
423 return (BUS_PROBE_DEFAULT);
424 }
425 }
426
427 return (ENXIO);
428}
429
430static int
431stge_attach(device_t dev)
432{
433 struct stge_softc *sc;
434 struct ifnet *ifp;
435 uint8_t enaddr[ETHER_ADDR_LEN];
436 int error, flags, i;
437 uint16_t cmd;
438 uint32_t val;
439
440 error = 0;
441 sc = device_get_softc(dev);
442 sc->sc_dev = dev;
443
444 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
445 MTX_DEF);
446 mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
447 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
448 TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
449
450 /*
451 * Map the device.
452 */
453 pci_enable_busmaster(dev);
454 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
455 val = pci_read_config(dev, PCIR_BAR(1), 4);
456 if ((val & 0x01) != 0)
457 sc->sc_spec = stge_res_spec_mem;
458 else {
459 val = pci_read_config(dev, PCIR_BAR(0), 4);
460 if ((val & 0x01) == 0) {
461 device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
462 error = ENXIO;
463 goto fail;
464 }
465 sc->sc_spec = stge_res_spec_io;
466 }
467 error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
468 if (error != 0) {
469 device_printf(dev, "couldn't allocate %s resources\n",
470 sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
471 goto fail;
472 }
473 sc->sc_rev = pci_get_revid(dev);
474
475 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
476 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
477 "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
478 sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
479
480 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
481 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
482 "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
483 sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
484
485 /* Pull in device tunables. */
486 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
487 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
488 "rxint_nframe", &sc->sc_rxint_nframe);
489 if (error == 0) {
490 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
491 sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
492 device_printf(dev, "rxint_nframe value out of range; "
493 "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
494 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
495 }
496 }
497
498 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
499 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
500 "rxint_dmawait", &sc->sc_rxint_dmawait);
501 if (error == 0) {
502 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
503 sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
504 device_printf(dev, "rxint_dmawait value out of range; "
505 "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
506 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
507 }
508 }
509
510 if ((error = stge_dma_alloc(sc) != 0))
511 goto fail;
512
513 /*
514 * Determine if we're copper or fiber. It affects how we
515 * reset the card.
516 */
517 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
518 sc->sc_usefiber = 1;
519 else
520 sc->sc_usefiber = 0;
521
522 /* Load LED configuration from EEPROM. */
523 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
524
525 /*
526 * Reset the chip to a known state.
527 */
528 STGE_LOCK(sc);
529 stge_reset(sc, STGE_RESET_FULL);
530 STGE_UNLOCK(sc);
531
532 /*
533 * Reading the station address from the EEPROM doesn't seem
534 * to work, at least on my sample boards. Instead, since
535 * the reset sequence does AutoInit, read it from the station
536 * address registers. For Sundance 1023 you can only read it
537 * from EEPROM.
538 */
539 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
540 uint16_t v;
541
542 v = CSR_READ_2(sc, STGE_StationAddress0);
543 enaddr[0] = v & 0xff;
544 enaddr[1] = v >> 8;
545 v = CSR_READ_2(sc, STGE_StationAddress1);
546 enaddr[2] = v & 0xff;
547 enaddr[3] = v >> 8;
548 v = CSR_READ_2(sc, STGE_StationAddress2);
549 enaddr[4] = v & 0xff;
550 enaddr[5] = v >> 8;
551 sc->sc_stge1023 = 0;
552 } else {
553 uint16_t myaddr[ETHER_ADDR_LEN / 2];
554 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
555 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
556 &myaddr[i]);
557 myaddr[i] = le16toh(myaddr[i]);
558 }
559 bcopy(myaddr, enaddr, sizeof(enaddr));
560 sc->sc_stge1023 = 1;
561 }
562
563 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
564 if (ifp == NULL) {
565 device_printf(sc->sc_dev, "failed to if_alloc()\n");
566 error = ENXIO;
567 goto fail;
568 }
569
570 ifp->if_softc = sc;
571 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
572 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
573 ifp->if_ioctl = stge_ioctl;
574 ifp->if_start = stge_start;
575 ifp->if_init = stge_init;
576 ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
577 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
578 IFQ_SET_READY(&ifp->if_snd);
579 /* Revision B3 and earlier chips have checksum bug. */
580 if (sc->sc_rev >= 0x0c) {
581 ifp->if_hwassist = STGE_CSUM_FEATURES;
582 ifp->if_capabilities = IFCAP_HWCSUM;
583 } else {
584 ifp->if_hwassist = 0;
585 ifp->if_capabilities = 0;
586 }
587 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
588 ifp->if_capenable = ifp->if_capabilities;
589
590 /*
591 * Read some important bits from the PhyCtrl register.
592 */
593 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
594 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
595
596 /* Set up MII bus. */
597 flags = MIIF_DOPAUSE;
598 if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
599 flags |= MIIF_MACPRIV0;
600 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
601 stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
602 flags);
603 if (error != 0) {
604 device_printf(sc->sc_dev, "attaching PHYs failed\n");
605 goto fail;
606 }
607
608 ether_ifattach(ifp, enaddr);
609
610 /* VLAN capability setup */
611 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
612 if (sc->sc_rev >= 0x0c)
613 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
614 ifp->if_capenable = ifp->if_capabilities;
615#ifdef DEVICE_POLLING
616 ifp->if_capabilities |= IFCAP_POLLING;
617#endif
618 /*
619 * Tell the upper layer(s) we support long frames.
620 * Must appear after the call to ether_ifattach() because
621 * ether_ifattach() sets ifi_hdrlen to the default value.
622 */
623 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
624
625 /*
626 * The manual recommends disabling early transmit, so we
627 * do. It's disabled anyway, if using IP checksumming,
628 * since the entire packet must be in the FIFO in order
629 * for the chip to perform the checksum.
630 */
631 sc->sc_txthresh = 0x0fff;
632
633 /*
634 * Disable MWI if the PCI layer tells us to.
635 */
636 sc->sc_DMACtrl = 0;
637 if ((cmd & PCIM_CMD_MWRICEN) == 0)
638 sc->sc_DMACtrl |= DMAC_MWIDisable;
639
640 /*
641 * Hookup IRQ
642 */
643 error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
644 NULL, stge_intr, sc, &sc->sc_ih);
645 if (error != 0) {
646 ether_ifdetach(ifp);
647 device_printf(sc->sc_dev, "couldn't set up IRQ\n");
648 sc->sc_ifp = NULL;
649 goto fail;
650 }
651
652fail:
653 if (error != 0)
654 stge_detach(dev);
655
656 return (error);
657}
658
659static int
660stge_detach(device_t dev)
661{
662 struct stge_softc *sc;
663 struct ifnet *ifp;
664
665 sc = device_get_softc(dev);
666
667 ifp = sc->sc_ifp;
668#ifdef DEVICE_POLLING
669 if (ifp && ifp->if_capenable & IFCAP_POLLING)
670 ether_poll_deregister(ifp);
671#endif
672 if (device_is_attached(dev)) {
673 STGE_LOCK(sc);
674 /* XXX */
675 sc->sc_detach = 1;
676 stge_stop(sc);
677 STGE_UNLOCK(sc);
678 callout_drain(&sc->sc_tick_ch);
679 taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
680 ether_ifdetach(ifp);
681 }
682
683 if (sc->sc_miibus != NULL) {
684 device_delete_child(dev, sc->sc_miibus);
685 sc->sc_miibus = NULL;
686 }
687 bus_generic_detach(dev);
688 stge_dma_free(sc);
689
690 if (ifp != NULL) {
691 if_free(ifp);
692 sc->sc_ifp = NULL;
693 }
694
695 if (sc->sc_ih) {
696 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
697 sc->sc_ih = NULL;
698 }
699 bus_release_resources(dev, sc->sc_spec, sc->sc_res);
700
701 mtx_destroy(&sc->sc_mii_mtx);
702 mtx_destroy(&sc->sc_mtx);
703
704 return (0);
705}
706
707struct stge_dmamap_arg {
708 bus_addr_t stge_busaddr;
709};
710
711static void
712stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
713{
714 struct stge_dmamap_arg *ctx;
715
716 if (error != 0)
717 return;
718
719 ctx = (struct stge_dmamap_arg *)arg;
720 ctx->stge_busaddr = segs[0].ds_addr;
721}
722
723static int
724stge_dma_alloc(struct stge_softc *sc)
725{
726 struct stge_dmamap_arg ctx;
727 struct stge_txdesc *txd;
728 struct stge_rxdesc *rxd;
729 int error, i;
730
731 /* create parent tag. */
732 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
733 1, 0, /* algnmnt, boundary */
734 STGE_DMA_MAXADDR, /* lowaddr */
735 BUS_SPACE_MAXADDR, /* highaddr */
736 NULL, NULL, /* filter, filterarg */
737 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
738 0, /* nsegments */
739 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
740 0, /* flags */
741 NULL, NULL, /* lockfunc, lockarg */
742 &sc->sc_cdata.stge_parent_tag);
743 if (error != 0) {
744 device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
745 goto fail;
746 }
747 /* create tag for Tx ring. */
748 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
749 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
750 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
751 BUS_SPACE_MAXADDR, /* highaddr */
752 NULL, NULL, /* filter, filterarg */
753 STGE_TX_RING_SZ, /* maxsize */
754 1, /* nsegments */
755 STGE_TX_RING_SZ, /* maxsegsize */
756 0, /* flags */
757 NULL, NULL, /* lockfunc, lockarg */
758 &sc->sc_cdata.stge_tx_ring_tag);
759 if (error != 0) {
760 device_printf(sc->sc_dev,
761 "failed to allocate Tx ring DMA tag\n");
762 goto fail;
763 }
764
765 /* create tag for Rx ring. */
766 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
767 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
768 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
769 BUS_SPACE_MAXADDR, /* highaddr */
770 NULL, NULL, /* filter, filterarg */
771 STGE_RX_RING_SZ, /* maxsize */
772 1, /* nsegments */
773 STGE_RX_RING_SZ, /* maxsegsize */
774 0, /* flags */
775 NULL, NULL, /* lockfunc, lockarg */
776 &sc->sc_cdata.stge_rx_ring_tag);
777 if (error != 0) {
778 device_printf(sc->sc_dev,
779 "failed to allocate Rx ring DMA tag\n");
780 goto fail;
781 }
782
783 /* create tag for Tx buffers. */
784 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
785 1, 0, /* algnmnt, boundary */
786 BUS_SPACE_MAXADDR, /* lowaddr */
787 BUS_SPACE_MAXADDR, /* highaddr */
788 NULL, NULL, /* filter, filterarg */
789 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */
790 STGE_MAXTXSEGS, /* nsegments */
791 MCLBYTES, /* maxsegsize */
792 0, /* flags */
793 NULL, NULL, /* lockfunc, lockarg */
794 &sc->sc_cdata.stge_tx_tag);
795 if (error != 0) {
796 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
797 goto fail;
798 }
799
800 /* create tag for Rx buffers. */
801 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
802 1, 0, /* algnmnt, boundary */
803 BUS_SPACE_MAXADDR, /* lowaddr */
804 BUS_SPACE_MAXADDR, /* highaddr */
805 NULL, NULL, /* filter, filterarg */
806 MCLBYTES, /* maxsize */
807 1, /* nsegments */
808 MCLBYTES, /* maxsegsize */
809 0, /* flags */
810 NULL, NULL, /* lockfunc, lockarg */
811 &sc->sc_cdata.stge_rx_tag);
812 if (error != 0) {
813 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
814 goto fail;
815 }
816
817 /* allocate DMA'able memory and load the DMA map for Tx ring. */
818 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
819 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT |
820 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map);
821 if (error != 0) {
822 device_printf(sc->sc_dev,
823 "failed to allocate DMA'able memory for Tx ring\n");
824 goto fail;
825 }
826
827 ctx.stge_busaddr = 0;
828 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
829 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
830 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
831 if (error != 0 || ctx.stge_busaddr == 0) {
832 device_printf(sc->sc_dev,
833 "failed to load DMA'able memory for Tx ring\n");
834 goto fail;
835 }
836 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
837
838 /* allocate DMA'able memory and load the DMA map for Rx ring. */
839 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
840 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT |
841 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map);
842 if (error != 0) {
843 device_printf(sc->sc_dev,
844 "failed to allocate DMA'able memory for Rx ring\n");
845 goto fail;
846 }
847
848 ctx.stge_busaddr = 0;
849 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
850 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
851 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
852 if (error != 0 || ctx.stge_busaddr == 0) {
853 device_printf(sc->sc_dev,
854 "failed to load DMA'able memory for Rx ring\n");
855 goto fail;
856 }
857 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
858
859 /* create DMA maps for Tx buffers. */
860 for (i = 0; i < STGE_TX_RING_CNT; i++) {
861 txd = &sc->sc_cdata.stge_txdesc[i];
862 txd->tx_m = NULL;
863 txd->tx_dmamap = 0;
864 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
865 &txd->tx_dmamap);
866 if (error != 0) {
867 device_printf(sc->sc_dev,
868 "failed to create Tx dmamap\n");
869 goto fail;
870 }
871 }
872 /* create DMA maps for Rx buffers. */
873 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
874 &sc->sc_cdata.stge_rx_sparemap)) != 0) {
875 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
876 goto fail;
877 }
878 for (i = 0; i < STGE_RX_RING_CNT; i++) {
879 rxd = &sc->sc_cdata.stge_rxdesc[i];
880 rxd->rx_m = NULL;
881 rxd->rx_dmamap = 0;
882 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
883 &rxd->rx_dmamap);
884 if (error != 0) {
885 device_printf(sc->sc_dev,
886 "failed to create Rx dmamap\n");
887 goto fail;
888 }
889 }
890
891fail:
892 return (error);
893}
894
895static void
896stge_dma_free(struct stge_softc *sc)
897{
898 struct stge_txdesc *txd;
899 struct stge_rxdesc *rxd;
900 int i;
901
902 /* Tx ring */
903 if (sc->sc_cdata.stge_tx_ring_tag) {
904 if (sc->sc_cdata.stge_tx_ring_map)
905 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
906 sc->sc_cdata.stge_tx_ring_map);
907 if (sc->sc_cdata.stge_tx_ring_map &&
908 sc->sc_rdata.stge_tx_ring)
909 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
910 sc->sc_rdata.stge_tx_ring,
911 sc->sc_cdata.stge_tx_ring_map);
912 sc->sc_rdata.stge_tx_ring = NULL;
913 sc->sc_cdata.stge_tx_ring_map = 0;
914 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
915 sc->sc_cdata.stge_tx_ring_tag = NULL;
916 }
917 /* Rx ring */
918 if (sc->sc_cdata.stge_rx_ring_tag) {
919 if (sc->sc_cdata.stge_rx_ring_map)
920 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
921 sc->sc_cdata.stge_rx_ring_map);
922 if (sc->sc_cdata.stge_rx_ring_map &&
923 sc->sc_rdata.stge_rx_ring)
924 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
925 sc->sc_rdata.stge_rx_ring,
926 sc->sc_cdata.stge_rx_ring_map);
927 sc->sc_rdata.stge_rx_ring = NULL;
928 sc->sc_cdata.stge_rx_ring_map = 0;
929 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
930 sc->sc_cdata.stge_rx_ring_tag = NULL;
931 }
932 /* Tx buffers */
933 if (sc->sc_cdata.stge_tx_tag) {
934 for (i = 0; i < STGE_TX_RING_CNT; i++) {
935 txd = &sc->sc_cdata.stge_txdesc[i];
936 if (txd->tx_dmamap) {
937 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
938 txd->tx_dmamap);
939 txd->tx_dmamap = 0;
940 }
941 }
942 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
943 sc->sc_cdata.stge_tx_tag = NULL;
944 }
945 /* Rx buffers */
946 if (sc->sc_cdata.stge_rx_tag) {
947 for (i = 0; i < STGE_RX_RING_CNT; i++) {
948 rxd = &sc->sc_cdata.stge_rxdesc[i];
949 if (rxd->rx_dmamap) {
950 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
951 rxd->rx_dmamap);
952 rxd->rx_dmamap = 0;
953 }
954 }
955 if (sc->sc_cdata.stge_rx_sparemap) {
956 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
957 sc->sc_cdata.stge_rx_sparemap);
958 sc->sc_cdata.stge_rx_sparemap = 0;
959 }
960 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
961 sc->sc_cdata.stge_rx_tag = NULL;
962 }
963
964 if (sc->sc_cdata.stge_parent_tag) {
965 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
966 sc->sc_cdata.stge_parent_tag = NULL;
967 }
968}
969
970/*
971 * stge_shutdown:
972 *
973 * Make sure the interface is stopped at reboot time.
974 */
975static int
976stge_shutdown(device_t dev)
977{
978
979 return (stge_suspend(dev));
980}
981
982static void
983stge_setwol(struct stge_softc *sc)
984{
985 struct ifnet *ifp;
986 uint8_t v;
987
988 STGE_LOCK_ASSERT(sc);
989
990 ifp = sc->sc_ifp;
991 v = CSR_READ_1(sc, STGE_WakeEvent);
992 /* Disable all WOL bits. */
993 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
994 WE_WakeOnLanEnable);
995 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
996 v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
997 CSR_WRITE_1(sc, STGE_WakeEvent, v);
998 /* Reset Tx and prevent transmission. */
999 CSR_WRITE_4(sc, STGE_AsicCtrl,
1000 CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
1001 /*
1002 * TC9021 automatically reset link speed to 100Mbps when it's put
1003 * into sleep so there is no need to try to resetting link speed.
1004 */
1005}
1006
1007static int
1008stge_suspend(device_t dev)
1009{
1010 struct stge_softc *sc;
1011
1012 sc = device_get_softc(dev);
1013
1014 STGE_LOCK(sc);
1015 stge_stop(sc);
1016 sc->sc_suspended = 1;
1017 stge_setwol(sc);
1018 STGE_UNLOCK(sc);
1019
1020 return (0);
1021}
1022
1023static int
1024stge_resume(device_t dev)
1025{
1026 struct stge_softc *sc;
1027 struct ifnet *ifp;
1028 uint8_t v;
1029
1030 sc = device_get_softc(dev);
1031
1032 STGE_LOCK(sc);
1033 /*
1034 * Clear WOL bits, so special frames wouldn't interfere
1035 * normal Rx operation anymore.
1036 */
1037 v = CSR_READ_1(sc, STGE_WakeEvent);
1038 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
1039 WE_WakeOnLanEnable);
1040 CSR_WRITE_1(sc, STGE_WakeEvent, v);
1041 ifp = sc->sc_ifp;
1042 if (ifp->if_flags & IFF_UP)
1043 stge_init_locked(sc);
1044
1045 sc->sc_suspended = 0;
1046 STGE_UNLOCK(sc);
1047
1048 return (0);
1049}
1050
1051static void
1052stge_dma_wait(struct stge_softc *sc)
1053{
1054 int i;
1055
1056 for (i = 0; i < STGE_TIMEOUT; i++) {
1057 DELAY(2);
1058 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1059 break;
1060 }
1061
1062 if (i == STGE_TIMEOUT)
1063 device_printf(sc->sc_dev, "DMA wait timed out\n");
1064}
1065
1066static int
1067stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1068{
1069 struct stge_txdesc *txd;
1070 struct stge_tfd *tfd;
1071 struct mbuf *m;
1072 bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1073 int error, i, nsegs, si;
1074 uint64_t csum_flags, tfc;
1075
1076 STGE_LOCK_ASSERT(sc);
1077
1078 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1079 return (ENOBUFS);
1080
1081 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1082 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1083 if (error == EFBIG) {
1084 m = m_collapse(*m_head, M_DONTWAIT, STGE_MAXTXSEGS);
1085 if (m == NULL) {
1086 m_freem(*m_head);
1087 *m_head = NULL;
1088 return (ENOMEM);
1089 }
1090 *m_head = m;
1091 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1092 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1093 if (error != 0) {
1094 m_freem(*m_head);
1095 *m_head = NULL;
1096 return (error);
1097 }
1098 } else if (error != 0)
1099 return (error);
1100 if (nsegs == 0) {
1101 m_freem(*m_head);
1102 *m_head = NULL;
1103 return (EIO);
1104 }
1105
1106 m = *m_head;
1107 csum_flags = 0;
1108 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1109 if (m->m_pkthdr.csum_flags & CSUM_IP)
1110 csum_flags |= TFD_IPChecksumEnable;
1111 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1112 csum_flags |= TFD_TCPChecksumEnable;
1113 else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1114 csum_flags |= TFD_UDPChecksumEnable;
1115 }
1116
1117 si = sc->sc_cdata.stge_tx_prod;
1118 tfd = &sc->sc_rdata.stge_tx_ring[si];
1119 for (i = 0; i < nsegs; i++)
1120 tfd->tfd_frags[i].frag_word0 =
1121 htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1122 FRAG_LEN(txsegs[i].ds_len));
1123 sc->sc_cdata.stge_tx_cnt++;
1124
1125 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1126 TFD_FragCount(nsegs) | csum_flags;
1127 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1128 tfc |= TFD_TxDMAIndicate;
1129
1130 /* Update producer index. */
1131 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1132
1133 /* Check if we have a VLAN tag to insert. */
1134 if (m->m_flags & M_VLANTAG)
1135 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1136 tfd->tfd_control = htole64(tfc);
1137
1138 /* Update Tx Queue. */
1139 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1140 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1141 txd->tx_m = m;
1142
1143 /* Sync descriptors. */
1144 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1145 BUS_DMASYNC_PREWRITE);
1146 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1147 sc->sc_cdata.stge_tx_ring_map,
1148 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1149
1150 return (0);
1151}
1152
1153/*
1154 * stge_start: [ifnet interface function]
1155 *
1156 * Start packet transmission on the interface.
1157 */
1158static void
1159stge_start(struct ifnet *ifp)
1160{
1161 struct stge_softc *sc;
1162
1163 sc = ifp->if_softc;
1164 STGE_LOCK(sc);
1165 stge_start_locked(ifp);
1166 STGE_UNLOCK(sc);
1167}
1168
1169static void
1170stge_start_locked(struct ifnet *ifp)
1171{
1172 struct stge_softc *sc;
1173 struct mbuf *m_head;
1174 int enq;
1175
1176 sc = ifp->if_softc;
1177
1178 STGE_LOCK_ASSERT(sc);
1179
1180 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1181 IFF_DRV_RUNNING || sc->sc_link == 0)
1182 return;
1183
1184 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1185 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1186 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1187 break;
1188 }
1189
1190 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1191 if (m_head == NULL)
1192 break;
1193 /*
1194 * Pack the data into the transmit ring. If we
1195 * don't have room, set the OACTIVE flag and wait
1196 * for the NIC to drain the ring.
1197 */
1198 if (stge_encap(sc, &m_head)) {
1199 if (m_head == NULL)
1200 break;
1201 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1202 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1203 break;
1204 }
1205
1206 enq++;
1207 /*
1208 * If there's a BPF listener, bounce a copy of this frame
1209 * to him.
1210 */
1211 ETHER_BPF_MTAP(ifp, m_head);
1212 }
1213
1214 if (enq > 0) {
1215 /* Transmit */
1216 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1217
1218 /* Set a timeout in case the chip goes out to lunch. */
1219 sc->sc_watchdog_timer = 5;
1220 }
1221}
1222
1223/*
1224 * stge_watchdog:
1225 *
1226 * Watchdog timer handler.
1227 */
1228static void
1229stge_watchdog(struct stge_softc *sc)
1230{
1231 struct ifnet *ifp;
1232
1233 STGE_LOCK_ASSERT(sc);
1234
1235 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1236 return;
1237
1238 ifp = sc->sc_ifp;
1239 if_printf(sc->sc_ifp, "device timeout\n");
1240 ifp->if_oerrors++;
1241 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1242 stge_init_locked(sc);
1243 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1244 stge_start_locked(ifp);
1245}
1246
1247/*
1248 * stge_ioctl: [ifnet interface function]
1249 *
1250 * Handle control requests from the operator.
1251 */
1252static int
1253stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1254{
1255 struct stge_softc *sc;
1256 struct ifreq *ifr;
1257 struct mii_data *mii;
1258 int error, mask;
1259
1260 sc = ifp->if_softc;
1261 ifr = (struct ifreq *)data;
1262 error = 0;
1263 switch (cmd) {
1264 case SIOCSIFMTU:
1265 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1266 error = EINVAL;
1267 else if (ifp->if_mtu != ifr->ifr_mtu) {
1268 ifp->if_mtu = ifr->ifr_mtu;
1269 STGE_LOCK(sc);
1270 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1271 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1272 stge_init_locked(sc);
1273 }
1274 STGE_UNLOCK(sc);
1275 }
1276 break;
1277 case SIOCSIFFLAGS:
1278 STGE_LOCK(sc);
1279 if ((ifp->if_flags & IFF_UP) != 0) {
1280 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1281 if (((ifp->if_flags ^ sc->sc_if_flags)
1282 & IFF_PROMISC) != 0)
1283 stge_set_filter(sc);
1284 } else {
1285 if (sc->sc_detach == 0)
1286 stge_init_locked(sc);
1287 }
1288 } else {
1289 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1290 stge_stop(sc);
1291 }
1292 sc->sc_if_flags = ifp->if_flags;
1293 STGE_UNLOCK(sc);
1294 break;
1295 case SIOCADDMULTI:
1296 case SIOCDELMULTI:
1297 STGE_LOCK(sc);
1298 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1299 stge_set_multi(sc);
1300 STGE_UNLOCK(sc);
1301 break;
1302 case SIOCSIFMEDIA:
1303 case SIOCGIFMEDIA:
1304 mii = device_get_softc(sc->sc_miibus);
1305 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1306 break;
1307 case SIOCSIFCAP:
1308 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1309#ifdef DEVICE_POLLING
1310 if ((mask & IFCAP_POLLING) != 0) {
1311 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1312 error = ether_poll_register(stge_poll, ifp);
1313 if (error != 0)
1314 break;
1315 STGE_LOCK(sc);
1316 CSR_WRITE_2(sc, STGE_IntEnable, 0);
1317 ifp->if_capenable |= IFCAP_POLLING;
1318 STGE_UNLOCK(sc);
1319 } else {
1320 error = ether_poll_deregister(ifp);
1321 if (error != 0)
1322 break;
1323 STGE_LOCK(sc);
1324 CSR_WRITE_2(sc, STGE_IntEnable,
1325 sc->sc_IntEnable);
1326 ifp->if_capenable &= ~IFCAP_POLLING;
1327 STGE_UNLOCK(sc);
1328 }
1329 }
1330#endif
1331 if ((mask & IFCAP_HWCSUM) != 0) {
1332 ifp->if_capenable ^= IFCAP_HWCSUM;
1333 if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1334 (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1335 ifp->if_hwassist = STGE_CSUM_FEATURES;
1336 else
1337 ifp->if_hwassist = 0;
1338 }
1339 if ((mask & IFCAP_WOL) != 0 &&
1340 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1341 if ((mask & IFCAP_WOL_MAGIC) != 0)
1342 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1343 }
1344 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1345 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1346 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1347 STGE_LOCK(sc);
1348 stge_vlan_setup(sc);
1349 STGE_UNLOCK(sc);
1350 }
1351 }
1352 VLAN_CAPABILITIES(ifp);
1353 break;
1354 default:
1355 error = ether_ioctl(ifp, cmd, data);
1356 break;
1357 }
1358
1359 return (error);
1360}
1361
1362static void
1363stge_link_task(void *arg, int pending)
1364{
1365 struct stge_softc *sc;
1366 struct mii_data *mii;
1367 uint32_t v, ac;
1368 int i;
1369
1370 sc = (struct stge_softc *)arg;
1371 STGE_LOCK(sc);
1372
1373 mii = device_get_softc(sc->sc_miibus);
1374 if (mii->mii_media_status & IFM_ACTIVE) {
1375 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1376 sc->sc_link = 1;
1377 } else
1378 sc->sc_link = 0;
1379
1380 sc->sc_MACCtrl = 0;
1381 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1382 sc->sc_MACCtrl |= MC_DuplexSelect;
1383 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
1384 sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1385 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
1386 sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1387 /*
1388 * Update STGE_MACCtrl register depending on link status.
1389 * (duplex, flow control etc)
1390 */
1391 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1392 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1393 v |= sc->sc_MACCtrl;
1394 CSR_WRITE_4(sc, STGE_MACCtrl, v);
1395 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1396 /* Duplex setting changed, reset Tx/Rx functions. */
1397 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1398 ac |= AC_TxReset | AC_RxReset;
1399 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1400 for (i = 0; i < STGE_TIMEOUT; i++) {
1401 DELAY(100);
1402 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1403 break;
1404 }
1405 if (i == STGE_TIMEOUT)
1406 device_printf(sc->sc_dev, "reset failed to complete\n");
1407 }
1408 STGE_UNLOCK(sc);
1409}
1410
1411static __inline int
1412stge_tx_error(struct stge_softc *sc)
1413{
1414 uint32_t txstat;
1415 int error;
1416
1417 for (error = 0;;) {
1418 txstat = CSR_READ_4(sc, STGE_TxStatus);
1419 if ((txstat & TS_TxComplete) == 0)
1420 break;
1421 /* Tx underrun */
1422 if ((txstat & TS_TxUnderrun) != 0) {
1423 /*
1424 * XXX
1425 * There should be a more better way to recover
1426 * from Tx underrun instead of a full reset.
1427 */
1428 if (sc->sc_nerr++ < STGE_MAXERR)
1429 device_printf(sc->sc_dev, "Tx underrun, "
1430 "resetting...\n");
1431 if (sc->sc_nerr == STGE_MAXERR)
1432 device_printf(sc->sc_dev, "too many errors; "
1433 "not reporting any more\n");
1434 error = -1;
1435 break;
1436 }
1437 /* Maximum/Late collisions, Re-enable Tx MAC. */
1438 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1439 CSR_WRITE_4(sc, STGE_MACCtrl,
1440 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1441 MC_TxEnable);
1442 }
1443
1444 return (error);
1445}
1446
1447/*
1448 * stge_intr:
1449 *
1450 * Interrupt service routine.
1451 */
1452static void
1453stge_intr(void *arg)
1454{
1455 struct stge_softc *sc;
1456 struct ifnet *ifp;
1457 int reinit;
1458 uint16_t status;
1459
1460 sc = (struct stge_softc *)arg;
1461 ifp = sc->sc_ifp;
1462
1463 STGE_LOCK(sc);
1464
1465#ifdef DEVICE_POLLING
1466 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1467 goto done_locked;
1468#endif
1469 status = CSR_READ_2(sc, STGE_IntStatus);
1470 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1471 goto done_locked;
1472
1473 /* Disable interrupts. */
1474 for (reinit = 0;;) {
1475 status = CSR_READ_2(sc, STGE_IntStatusAck);
1476 status &= sc->sc_IntEnable;
1477 if (status == 0)
1478 break;
1479 /* Host interface errors. */
1480 if ((status & IS_HostError) != 0) {
1481 device_printf(sc->sc_dev,
1482 "Host interface error, resetting...\n");
1483 reinit = 1;
1484 goto force_init;
1485 }
1486
1487 /* Receive interrupts. */
1488 if ((status & IS_RxDMAComplete) != 0) {
1489 stge_rxeof(sc);
1490 if ((status & IS_RFDListEnd) != 0)
1491 CSR_WRITE_4(sc, STGE_DMACtrl,
1492 DMAC_RxDMAPollNow);
1493 }
1494
1495 /* Transmit interrupts. */
1496 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1497 stge_txeof(sc);
1498
1499 /* Transmission errors.*/
1500 if ((status & IS_TxComplete) != 0) {
1501 if ((reinit = stge_tx_error(sc)) != 0)
1502 break;
1503 }
1504 }
1505
1506force_init:
1507 if (reinit != 0) {
1508 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1509 stge_init_locked(sc);
1510 }
1511
1512 /* Re-enable interrupts. */
1513 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1514
1515 /* Try to get more packets going. */
1516 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1517 stge_start_locked(ifp);
1518
1519done_locked:
1520 STGE_UNLOCK(sc);
1521}
1522
1523/*
1524 * stge_txeof:
1525 *
1526 * Helper; handle transmit interrupts.
1527 */
1528static void
1529stge_txeof(struct stge_softc *sc)
1530{
1531 struct ifnet *ifp;
1532 struct stge_txdesc *txd;
1533 uint64_t control;
1534 int cons;
1535
1536 STGE_LOCK_ASSERT(sc);
1537
1538 ifp = sc->sc_ifp;
1539
1540 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1541 if (txd == NULL)
1542 return;
1543 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1544 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1545
1546 /*
1547 * Go through our Tx list and free mbufs for those
1548 * frames which have been transmitted.
1549 */
1550 for (cons = sc->sc_cdata.stge_tx_cons;;
1551 cons = (cons + 1) % STGE_TX_RING_CNT) {
1552 if (sc->sc_cdata.stge_tx_cnt <= 0)
1553 break;
1554 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1555 if ((control & TFD_TFDDone) == 0)
1556 break;
1557 sc->sc_cdata.stge_tx_cnt--;
1558 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1559
1560 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1561 BUS_DMASYNC_POSTWRITE);
1562 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1563
1564 /* Output counter is updated with statistics register */
1565 m_freem(txd->tx_m);
1566 txd->tx_m = NULL;
1567 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1568 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1569 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1570 }
1571 sc->sc_cdata.stge_tx_cons = cons;
1572 if (sc->sc_cdata.stge_tx_cnt == 0)
1573 sc->sc_watchdog_timer = 0;
1574
1575 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1576 sc->sc_cdata.stge_tx_ring_map,
1577 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1578}
1579
1580static __inline void
1581stge_discard_rxbuf(struct stge_softc *sc, int idx)
1582{
1583 struct stge_rfd *rfd;
1584
1585 rfd = &sc->sc_rdata.stge_rx_ring[idx];
1586 rfd->rfd_status = 0;
1587}
1588
1589#ifndef __NO_STRICT_ALIGNMENT
1590/*
1591 * It seems that TC9021's DMA engine has alignment restrictions in
1592 * DMA scatter operations. The first DMA segment has no address
1593 * alignment restrictins but the rest should be aligned on 4(?) bytes
1594 * boundary. Otherwise it would corrupt random memory. Since we don't
1595 * know which one is used for the first segment in advance we simply
1596 * don't align at all.
1597 * To avoid copying over an entire frame to align, we allocate a new
1598 * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1599 * prepended into the existing mbuf chain.
1600 */
1601static __inline struct mbuf *
1602stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1603{
1604 struct mbuf *n;
1605
1606 n = NULL;
1607 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1608 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1609 m->m_data += ETHER_HDR_LEN;
1610 n = m;
1611 } else {
1612 MGETHDR(n, M_DONTWAIT, MT_DATA);
1613 if (n != NULL) {
1614 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1615 m->m_data += ETHER_HDR_LEN;
1616 m->m_len -= ETHER_HDR_LEN;
1617 n->m_len = ETHER_HDR_LEN;
1618 M_MOVE_PKTHDR(n, m);
1619 n->m_next = m;
1620 } else
1621 m_freem(m);
1622 }
1623
1624 return (n);
1625}
1626#endif
1627
1628/*
1629 * stge_rxeof:
1630 *
1631 * Helper; handle receive interrupts.
1632 */
1633static int
1634stge_rxeof(struct stge_softc *sc)
1635{
1636 struct ifnet *ifp;
1637 struct stge_rxdesc *rxd;
1638 struct mbuf *mp, *m;
1639 uint64_t status64;
1640 uint32_t status;
1641 int cons, prog, rx_npkts;
1642
1643 STGE_LOCK_ASSERT(sc);
1644
1645 rx_npkts = 0;
1646 ifp = sc->sc_ifp;
1647
1648 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1649 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1650
1651 prog = 0;
1652 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1653 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1654 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1655 status = RFD_RxStatus(status64);
1656 if ((status & RFD_RFDDone) == 0)
1657 break;
1658#ifdef DEVICE_POLLING
1659 if (ifp->if_capenable & IFCAP_POLLING) {
1660 if (sc->sc_cdata.stge_rxcycles <= 0)
1661 break;
1662 sc->sc_cdata.stge_rxcycles--;
1663 }
1664#endif
1665 prog++;
1666 rxd = &sc->sc_cdata.stge_rxdesc[cons];
1667 mp = rxd->rx_m;
1668
1669 /*
1670 * If the packet had an error, drop it. Note we count
1671 * the error later in the periodic stats update.
1672 */
1673 if ((status & RFD_FrameEnd) != 0 && (status &
1674 (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1675 RFD_RxAlignmentError | RFD_RxFCSError |
1676 RFD_RxLengthError)) != 0) {
1677 stge_discard_rxbuf(sc, cons);
1678 if (sc->sc_cdata.stge_rxhead != NULL) {
1679 m_freem(sc->sc_cdata.stge_rxhead);
1680 STGE_RXCHAIN_RESET(sc);
1681 }
1682 continue;
1683 }
1684 /*
1685 * Add a new receive buffer to the ring.
1686 */
1687 if (stge_newbuf(sc, cons) != 0) {
1688 ifp->if_iqdrops++;
1689 stge_discard_rxbuf(sc, cons);
1690 if (sc->sc_cdata.stge_rxhead != NULL) {
1691 m_freem(sc->sc_cdata.stge_rxhead);
1692 STGE_RXCHAIN_RESET(sc);
1693 }
1694 continue;
1695 }
1696
1697 if ((status & RFD_FrameEnd) != 0)
1698 mp->m_len = RFD_RxDMAFrameLen(status) -
1699 sc->sc_cdata.stge_rxlen;
1700 sc->sc_cdata.stge_rxlen += mp->m_len;
1701
1702 /* Chain mbufs. */
1703 if (sc->sc_cdata.stge_rxhead == NULL) {
1704 sc->sc_cdata.stge_rxhead = mp;
1705 sc->sc_cdata.stge_rxtail = mp;
1706 } else {
1707 mp->m_flags &= ~M_PKTHDR;
1708 sc->sc_cdata.stge_rxtail->m_next = mp;
1709 sc->sc_cdata.stge_rxtail = mp;
1710 }
1711
1712 if ((status & RFD_FrameEnd) != 0) {
1713 m = sc->sc_cdata.stge_rxhead;
1714 m->m_pkthdr.rcvif = ifp;
1715 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1716
1717 if (m->m_pkthdr.len > sc->sc_if_framesize) {
1718 m_freem(m);
1719 STGE_RXCHAIN_RESET(sc);
1720 continue;
1721 }
1722 /*
1723 * Set the incoming checksum information for
1724 * the packet.
1725 */
1726 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1727 if ((status & RFD_IPDetected) != 0) {
1728 m->m_pkthdr.csum_flags |=
1729 CSUM_IP_CHECKED;
1730 if ((status & RFD_IPError) == 0)
1731 m->m_pkthdr.csum_flags |=
1732 CSUM_IP_VALID;
1733 }
1734 if (((status & RFD_TCPDetected) != 0 &&
1735 (status & RFD_TCPError) == 0) ||
1736 ((status & RFD_UDPDetected) != 0 &&
1737 (status & RFD_UDPError) == 0)) {
1738 m->m_pkthdr.csum_flags |=
1739 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1740 m->m_pkthdr.csum_data = 0xffff;
1741 }
1742 }
1743
1744#ifndef __NO_STRICT_ALIGNMENT
1745 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1746 if ((m = stge_fixup_rx(sc, m)) == NULL) {
1747 STGE_RXCHAIN_RESET(sc);
1748 continue;
1749 }
1750 }
1751#endif
1752 /* Check for VLAN tagged packets. */
1753 if ((status & RFD_VLANDetected) != 0 &&
1754 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1755 m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1756 m->m_flags |= M_VLANTAG;
1757 }
1758
1759 STGE_UNLOCK(sc);
1760 /* Pass it on. */
1761 (*ifp->if_input)(ifp, m);
1762 STGE_LOCK(sc);
1763 rx_npkts++;
1764
1765 STGE_RXCHAIN_RESET(sc);
1766 }
1767 }
1768
1769 if (prog > 0) {
1770 /* Update the consumer index. */
1771 sc->sc_cdata.stge_rx_cons = cons;
1772 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1773 sc->sc_cdata.stge_rx_ring_map,
1774 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1775 }
1776 return (rx_npkts);
1777}
1778
1779#ifdef DEVICE_POLLING
1780static int
1781stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1782{
1783 struct stge_softc *sc;
1784 uint16_t status;
1785 int rx_npkts;
1786
1787 rx_npkts = 0;
1788 sc = ifp->if_softc;
1789 STGE_LOCK(sc);
1790 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1791 STGE_UNLOCK(sc);
1792 return (rx_npkts);
1793 }
1794
1795 sc->sc_cdata.stge_rxcycles = count;
1796 rx_npkts = stge_rxeof(sc);
1797 stge_txeof(sc);
1798
1799 if (cmd == POLL_AND_CHECK_STATUS) {
1800 status = CSR_READ_2(sc, STGE_IntStatus);
1801 status &= sc->sc_IntEnable;
1802 if (status != 0) {
1803 if ((status & IS_HostError) != 0) {
1804 device_printf(sc->sc_dev,
1805 "Host interface error, resetting...\n");
1806 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1807 stge_init_locked(sc);
1808 }
1809 if ((status & IS_TxComplete) != 0) {
1810 if (stge_tx_error(sc) != 0) {
1811 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1812 stge_init_locked(sc);
1813 }
1814 }
1815 }
1816
1817 }
1818
1819 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1820 stge_start_locked(ifp);
1821
1822 STGE_UNLOCK(sc);
1823 return (rx_npkts);
1824}
1825#endif /* DEVICE_POLLING */
1826
1827/*
1828 * stge_tick:
1829 *
1830 * One second timer, used to tick the MII.
1831 */
1832static void
1833stge_tick(void *arg)
1834{
1835 struct stge_softc *sc;
1836 struct mii_data *mii;
1837
1838 sc = (struct stge_softc *)arg;
1839
1840 STGE_LOCK_ASSERT(sc);
1841
1842 mii = device_get_softc(sc->sc_miibus);
1843 mii_tick(mii);
1844
1845 /* Update statistics counters. */
1846 stge_stats_update(sc);
1847
1848 /*
1849 * Relcaim any pending Tx descriptors to release mbufs in a
1850 * timely manner as we don't generate Tx completion interrupts
1851 * for every frame. This limits the delay to a maximum of one
1852 * second.
1853 */
1854 if (sc->sc_cdata.stge_tx_cnt != 0)
1855 stge_txeof(sc);
1856
1857 stge_watchdog(sc);
1858
1859 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1860}
1861
1862/*
1863 * stge_stats_update:
1864 *
1865 * Read the TC9021 statistics counters.
1866 */
1867static void
1868stge_stats_update(struct stge_softc *sc)
1869{
1870 struct ifnet *ifp;
1871
1872 STGE_LOCK_ASSERT(sc);
1873
1874 ifp = sc->sc_ifp;
1875
1876 CSR_READ_4(sc,STGE_OctetRcvOk);
1877
1878 ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1879
1880 ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1881
1882 CSR_READ_4(sc, STGE_OctetXmtdOk);
1883
1884 ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1885
1886 ifp->if_collisions +=
1887 CSR_READ_4(sc, STGE_LateCollisions) +
1888 CSR_READ_4(sc, STGE_MultiColFrames) +
1889 CSR_READ_4(sc, STGE_SingleColFrames);
1890
1891 ifp->if_oerrors +=
1892 CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1893 CSR_READ_2(sc, STGE_FramesWEXDeferal);
1894}
1895
1896/*
1897 * stge_reset:
1898 *
1899 * Perform a soft reset on the TC9021.
1900 */
1901static void
1902stge_reset(struct stge_softc *sc, uint32_t how)
1903{
1904 uint32_t ac;
1905 uint8_t v;
1906 int i, dv;
1907
1908 STGE_LOCK_ASSERT(sc);
1909
1910 dv = 5000;
1911 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1912 switch (how) {
1913 case STGE_RESET_TX:
1914 ac |= AC_TxReset | AC_FIFO;
1915 dv = 100;
1916 break;
1917 case STGE_RESET_RX:
1918 ac |= AC_RxReset | AC_FIFO;
1919 dv = 100;
1920 break;
1921 case STGE_RESET_FULL:
1922 default:
1923 /*
1924 * Only assert RstOut if we're fiber. We need GMII clocks
1925 * to be present in order for the reset to complete on fiber
1926 * cards.
1927 */
1928 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1929 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1930 (sc->sc_usefiber ? AC_RstOut : 0);
1931 break;
1932 }
1933
1934 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1935
1936 /* Account for reset problem at 10Mbps. */
1937 DELAY(dv);
1938
1939 for (i = 0; i < STGE_TIMEOUT; i++) {
1940 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1941 break;
1942 DELAY(dv);
1943 }
1944
1945 if (i == STGE_TIMEOUT)
1946 device_printf(sc->sc_dev, "reset failed to complete\n");
1947
1948 /* Set LED, from Linux IPG driver. */
1949 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1950 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1951 if ((sc->sc_led & 0x01) != 0)
1952 ac |= AC_LEDMode;
1953 if ((sc->sc_led & 0x03) != 0)
1954 ac |= AC_LEDModeBit1;
1955 if ((sc->sc_led & 0x08) != 0)
1956 ac |= AC_LEDSpeed;
1957 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1958
1959 /* Set PHY, from Linux IPG driver */
1960 v = CSR_READ_1(sc, STGE_PhySet);
1961 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1962 v |= ((sc->sc_led & 0x70) >> 4);
1963 CSR_WRITE_1(sc, STGE_PhySet, v);
1964}
1965
1966/*
1967 * stge_init: [ ifnet interface function ]
1968 *
1969 * Initialize the interface.
1970 */
1971static void
1972stge_init(void *xsc)
1973{
1974 struct stge_softc *sc;
1975
1976 sc = (struct stge_softc *)xsc;
1977 STGE_LOCK(sc);
1978 stge_init_locked(sc);
1979 STGE_UNLOCK(sc);
1980}
1981
1982static void
1983stge_init_locked(struct stge_softc *sc)
1984{
1985 struct ifnet *ifp;
1986 struct mii_data *mii;
1987 uint16_t eaddr[3];
1988 uint32_t v;
1989 int error;
1990
1991 STGE_LOCK_ASSERT(sc);
1992
1993 ifp = sc->sc_ifp;
1994 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1995 return;
1996 mii = device_get_softc(sc->sc_miibus);
1997
1998 /*
1999 * Cancel any pending I/O.
2000 */
2001 stge_stop(sc);
2002
2003 /*
2004 * Reset the chip to a known state.
2005 */
2006 stge_reset(sc, STGE_RESET_FULL);
2007
2008 /* Init descriptors. */
2009 error = stge_init_rx_ring(sc);
2010 if (error != 0) {
2011 device_printf(sc->sc_dev,
2012 "initialization failed: no memory for rx buffers\n");
2013 stge_stop(sc);
2014 goto out;
2015 }
2016 stge_init_tx_ring(sc);
2017
2018 /* Set the station address. */
2019 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2020 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2021 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2022 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2023
2024 /*
2025 * Set the statistics masks. Disable all the RMON stats,
2026 * and disable selected stats in the non-RMON stats registers.
2027 */
2028 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2029 CSR_WRITE_4(sc, STGE_StatisticsMask,
2030 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2031 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2032 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2033 (1U << 21));
2034
2035 /* Set up the receive filter. */
2036 stge_set_filter(sc);
2037 /* Program multicast filter. */
2038 stge_set_multi(sc);
2039
2040 /*
2041 * Give the transmit and receive ring to the chip.
2042 */
2043 CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2044 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2045 CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2046 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2047
2048 CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2049 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2050 CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2051 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2052
2053 /*
2054 * Initialize the Tx auto-poll period. It's OK to make this number
2055 * large (255 is the max, but we use 127) -- we explicitly kick the
2056 * transmit engine when there's actually a packet.
2057 */
2058 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2059
2060 /* ..and the Rx auto-poll period. */
2061 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2062
2063 /* Initialize the Tx start threshold. */
2064 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2065
2066 /* Rx DMA thresholds, from Linux */
2067 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2068 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2069
2070 /* Rx early threhold, from Linux */
2071 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2072
2073 /* Tx DMA thresholds, from Linux */
2074 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2075 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2076
2077 /*
2078 * Initialize the Rx DMA interrupt control register. We
2079 * request an interrupt after every incoming packet, but
2080 * defer it for sc_rxint_dmawait us. When the number of
2081 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2082 * deferring the interrupt, and signal it immediately.
2083 */
2084 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2085 RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2086 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2087
2088 /*
2089 * Initialize the interrupt mask.
2090 */
2091 sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2092 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2093#ifdef DEVICE_POLLING
2094 /* Disable interrupts if we are polling. */
2095 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2096 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2097 else
2098#endif
2099 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2100
2101 /*
2102 * Configure the DMA engine.
2103 * XXX Should auto-tune TxBurstLimit.
2104 */
2105 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2106
2107 /*
2108 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2109 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2110 * in the Rx FIFO.
2111 */
2112 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2113 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2114
2115 /*
2116 * Set the maximum frame size.
2117 */
2118 sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2119 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2120
2121 /*
2122 * Initialize MacCtrl -- do it before setting the media,
2123 * as setting the media will actually program the register.
2124 *
2125 * Note: We have to poke the IFS value before poking
2126 * anything else.
2127 */
2128 /* Tx/Rx MAC should be disabled before programming IFS.*/
2129 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2130
2131 stge_vlan_setup(sc);
2132
2133 if (sc->sc_rev >= 6) { /* >= B.2 */
2134 /* Multi-frag frame bug work-around. */
2135 CSR_WRITE_2(sc, STGE_DebugCtrl,
2136 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2137
2138 /* Tx Poll Now bug work-around. */
2139 CSR_WRITE_2(sc, STGE_DebugCtrl,
2140 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2141 /* Tx Poll Now bug work-around. */
2142 CSR_WRITE_2(sc, STGE_DebugCtrl,
2143 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2144 }
2145
2146 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2147 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2148 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2149 /*
2150 * It seems that transmitting frames without checking the state of
2151 * Rx/Tx MAC wedge the hardware.
2152 */
2153 stge_start_tx(sc);
2154 stge_start_rx(sc);
2155
2156 sc->sc_link = 0;
2157 /*
2158 * Set the current media.
2159 */
2160 mii_mediachg(mii);
2161
2162 /*
2163 * Start the one second MII clock.
2164 */
2165 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2166
2167 /*
2168 * ...all done!
2169 */
2170 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2171 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2172
2173 out:
2174 if (error != 0)
2175 device_printf(sc->sc_dev, "interface not running\n");
2176}
2177
2178static void
2179stge_vlan_setup(struct stge_softc *sc)
2180{
2181 struct ifnet *ifp;
2182 uint32_t v;
2183
2184 ifp = sc->sc_ifp;
2185 /*
2186 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2187 * MC_AutoVLANuntagging bit.
2188 * MC_AutoVLANtagging bit selects which VLAN source to use
2189 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2190 * bit has priority over MC_AutoVLANtagging bit. So we always
2191 * use TFC instead of STGE_VLANTag register.
2192 */
2193 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2194 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2195 v |= MC_AutoVLANuntagging;
2196 else
2197 v &= ~MC_AutoVLANuntagging;
2198 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2199}
2200
2201/*
2202 * Stop transmission on the interface.
2203 */
2204static void
2205stge_stop(struct stge_softc *sc)
2206{
2207 struct ifnet *ifp;
2208 struct stge_txdesc *txd;
2209 struct stge_rxdesc *rxd;
2210 uint32_t v;
2211 int i;
2212
2213 STGE_LOCK_ASSERT(sc);
2214 /*
2215 * Stop the one second clock.
2216 */
2217 callout_stop(&sc->sc_tick_ch);
2218 sc->sc_watchdog_timer = 0;
2219
2220 /*
2221 * Disable interrupts.
2222 */
2223 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2224
2225 /*
2226 * Stop receiver, transmitter, and stats update.
2227 */
2228 stge_stop_rx(sc);
2229 stge_stop_tx(sc);
2230 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2231 v |= MC_StatisticsDisable;
2232 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2233
2234 /*
2235 * Stop the transmit and receive DMA.
2236 */
2237 stge_dma_wait(sc);
2238 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2239 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2240 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2241 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2242
2243 /*
2244 * Free RX and TX mbufs still in the queues.
2245 */
2246 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2247 rxd = &sc->sc_cdata.stge_rxdesc[i];
2248 if (rxd->rx_m != NULL) {
2249 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2250 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2251 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2252 rxd->rx_dmamap);
2253 m_freem(rxd->rx_m);
2254 rxd->rx_m = NULL;
2255 }
2256 }
2257 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2258 txd = &sc->sc_cdata.stge_txdesc[i];
2259 if (txd->tx_m != NULL) {
2260 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2261 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2262 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2263 txd->tx_dmamap);
2264 m_freem(txd->tx_m);
2265 txd->tx_m = NULL;
2266 }
2267 }
2268
2269 /*
2270 * Mark the interface down and cancel the watchdog timer.
2271 */
2272 ifp = sc->sc_ifp;
2273 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2274 sc->sc_link = 0;
2275}
2276
2277static void
2278stge_start_tx(struct stge_softc *sc)
2279{
2280 uint32_t v;
2281 int i;
2282
2283 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2284 if ((v & MC_TxEnabled) != 0)
2285 return;
2286 v |= MC_TxEnable;
2287 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2288 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2289 for (i = STGE_TIMEOUT; i > 0; i--) {
2290 DELAY(10);
2291 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2292 if ((v & MC_TxEnabled) != 0)
2293 break;
2294 }
2295 if (i == 0)
2296 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2297}
2298
2299static void
2300stge_start_rx(struct stge_softc *sc)
2301{
2302 uint32_t v;
2303 int i;
2304
2305 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2306 if ((v & MC_RxEnabled) != 0)
2307 return;
2308 v |= MC_RxEnable;
2309 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2310 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2311 for (i = STGE_TIMEOUT; i > 0; i--) {
2312 DELAY(10);
2313 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2314 if ((v & MC_RxEnabled) != 0)
2315 break;
2316 }
2317 if (i == 0)
2318 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2319}
2320
2321static void
2322stge_stop_tx(struct stge_softc *sc)
2323{
2324 uint32_t v;
2325 int i;
2326
2327 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2328 if ((v & MC_TxEnabled) == 0)
2329 return;
2330 v |= MC_TxDisable;
2331 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2332 for (i = STGE_TIMEOUT; i > 0; i--) {
2333 DELAY(10);
2334 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2335 if ((v & MC_TxEnabled) == 0)
2336 break;
2337 }
2338 if (i == 0)
2339 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2340}
2341
2342static void
2343stge_stop_rx(struct stge_softc *sc)
2344{
2345 uint32_t v;
2346 int i;
2347
2348 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2349 if ((v & MC_RxEnabled) == 0)
2350 return;
2351 v |= MC_RxDisable;
2352 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2353 for (i = STGE_TIMEOUT; i > 0; i--) {
2354 DELAY(10);
2355 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2356 if ((v & MC_RxEnabled) == 0)
2357 break;
2358 }
2359 if (i == 0)
2360 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2361}
2362
2363static void
2364stge_init_tx_ring(struct stge_softc *sc)
2365{
2366 struct stge_ring_data *rd;
2367 struct stge_txdesc *txd;
2368 bus_addr_t addr;
2369 int i;
2370
2371 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2372 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2373
2374 sc->sc_cdata.stge_tx_prod = 0;
2375 sc->sc_cdata.stge_tx_cons = 0;
2376 sc->sc_cdata.stge_tx_cnt = 0;
2377
2378 rd = &sc->sc_rdata;
2379 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2380 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2381 if (i == (STGE_TX_RING_CNT - 1))
2382 addr = STGE_TX_RING_ADDR(sc, 0);
2383 else
2384 addr = STGE_TX_RING_ADDR(sc, i + 1);
2385 rd->stge_tx_ring[i].tfd_next = htole64(addr);
2386 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2387 txd = &sc->sc_cdata.stge_txdesc[i];
2388 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2389 }
2390
2391 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2392 sc->sc_cdata.stge_tx_ring_map,
2393 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2394
2395}
2396
2397static int
2398stge_init_rx_ring(struct stge_softc *sc)
2399{
2400 struct stge_ring_data *rd;
2401 bus_addr_t addr;
2402 int i;
2403
2404 sc->sc_cdata.stge_rx_cons = 0;
2405 STGE_RXCHAIN_RESET(sc);
2406
2407 rd = &sc->sc_rdata;
2408 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2409 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2410 if (stge_newbuf(sc, i) != 0)
2411 return (ENOBUFS);
2412 if (i == (STGE_RX_RING_CNT - 1))
2413 addr = STGE_RX_RING_ADDR(sc, 0);
2414 else
2415 addr = STGE_RX_RING_ADDR(sc, i + 1);
2416 rd->stge_rx_ring[i].rfd_next = htole64(addr);
2417 rd->stge_rx_ring[i].rfd_status = 0;
2418 }
2419
2420 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2421 sc->sc_cdata.stge_rx_ring_map,
2422 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2423
2424 return (0);
2425}
2426
2427/*
2428 * stge_newbuf:
2429 *
2430 * Add a receive buffer to the indicated descriptor.
2431 */
2432static int
2433stge_newbuf(struct stge_softc *sc, int idx)
2434{
2435 struct stge_rxdesc *rxd;
2436 struct stge_rfd *rfd;
2437 struct mbuf *m;
2438 bus_dma_segment_t segs[1];
2439 bus_dmamap_t map;
2440 int nsegs;
2441
2442 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2443 if (m == NULL)
2444 return (ENOBUFS);
2445 m->m_len = m->m_pkthdr.len = MCLBYTES;
2446 /*
2447 * The hardware requires 4bytes aligned DMA address when JUMBO
2448 * frame is used.
2449 */
2450 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2451 m_adj(m, ETHER_ALIGN);
2452
2453 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2454 sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2455 m_freem(m);
2456 return (ENOBUFS);
2457 }
2458 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2459
2460 rxd = &sc->sc_cdata.stge_rxdesc[idx];
2461 if (rxd->rx_m != NULL) {
2462 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2463 BUS_DMASYNC_POSTREAD);
2464 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2465 }
2466 map = rxd->rx_dmamap;
2467 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2468 sc->sc_cdata.stge_rx_sparemap = map;
2469 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2470 BUS_DMASYNC_PREREAD);
2471 rxd->rx_m = m;
2472
2473 rfd = &sc->sc_rdata.stge_rx_ring[idx];
2474 rfd->rfd_frag.frag_word0 =
2475 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2476 rfd->rfd_status = 0;
2477
2478 return (0);
2479}
2480
2481/*
2482 * stge_set_filter:
2483 *
2484 * Set up the receive filter.
2485 */
2486static void
2487stge_set_filter(struct stge_softc *sc)
2488{
2489 struct ifnet *ifp;
2490 uint16_t mode;
2491
2492 STGE_LOCK_ASSERT(sc);
2493
2494 ifp = sc->sc_ifp;
2495
2496 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2497 mode |= RM_ReceiveUnicast;
2498 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2499 mode |= RM_ReceiveBroadcast;
2500 else
2501 mode &= ~RM_ReceiveBroadcast;
2502 if ((ifp->if_flags & IFF_PROMISC) != 0)
2503 mode |= RM_ReceiveAllFrames;
2504 else
2505 mode &= ~RM_ReceiveAllFrames;
2506
2507 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2508}
2509
2510static void
2511stge_set_multi(struct stge_softc *sc)
2512{
2513 struct ifnet *ifp;
2514 struct ifmultiaddr *ifma;
2515 uint32_t crc;
2516 uint32_t mchash[2];
2517 uint16_t mode;
2518 int count;
2519
2520 STGE_LOCK_ASSERT(sc);
2521
2522 ifp = sc->sc_ifp;
2523
2524 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2525 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2526 if ((ifp->if_flags & IFF_PROMISC) != 0)
2527 mode |= RM_ReceiveAllFrames;
2528 else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2529 mode |= RM_ReceiveMulticast;
2530 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2531 return;
2532 }
2533
2534 /* clear existing filters. */
2535 CSR_WRITE_4(sc, STGE_HashTable0, 0);
2536 CSR_WRITE_4(sc, STGE_HashTable1, 0);
2537
2538 /*
2539 * Set up the multicast address filter by passing all multicast
2540 * addresses through a CRC generator, and then using the low-order
2541 * 6 bits as an index into the 64 bit multicast hash table. The
2542 * high order bits select the register, while the rest of the bits
2543 * select the bit within the register.
2544 */
2545
2546 bzero(mchash, sizeof(mchash));
2547
2548 count = 0;
2549 if_maddr_rlock(sc->sc_ifp);
2550 TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2551 if (ifma->ifma_addr->sa_family != AF_LINK)
2552 continue;
2553 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2554 ifma->ifma_addr), ETHER_ADDR_LEN);
2555
2556 /* Just want the 6 least significant bits. */
2557 crc &= 0x3f;
2558
2559 /* Set the corresponding bit in the hash table. */
2560 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2561 count++;
2562 }
2563 if_maddr_runlock(ifp);
2564
2565 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2566 if (count > 0)
2567 mode |= RM_ReceiveMulticastHash;
2568 else
2569 mode &= ~RM_ReceiveMulticastHash;
2570
2571 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2572 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2573 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2574}
2575
2576static int
2577sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2578{
2579 int error, value;
2580
2581 if (!arg1)
2582 return (EINVAL);
2583 value = *(int *)arg1;
2584 error = sysctl_handle_int(oidp, &value, 0, req);
2585 if (error || !req->newptr)
2586 return (error);
2587 if (value < low || value > high)
2588 return (EINVAL);
2589 *(int *)arg1 = value;
2590
2591 return (0);
2592}
2593
2594static int
2595sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2596{
2597 return (sysctl_int_range(oidp, arg1, arg2, req,
2598 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2599}
2600
2601static int
2602sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2603{
2604 return (sysctl_int_range(oidp, arg1, arg2, req,
2605 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2606}