Deleted Added
full compact
1/* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */
2
3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Device driver for the Sundance Tech. TC9021 10/100/1000
34 * Ethernet controller.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/stge/if_stge.c 213893 2010-10-15 14:52:11Z marius $");
38__FBSDID("$FreeBSD: head/sys/dev/stge/if_stge.c 215297 2010-11-14 13:26:10Z marius $");
39
40#ifdef HAVE_KERNEL_OPTION_HEADERS
41#include "opt_device_polling.h"
42#endif
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/endian.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/kernel.h>
50#include <sys/module.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55
56#include <net/bpf.h>
57#include <net/ethernet.h>
58#include <net/if.h>
59#include <net/if_dl.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62#include <net/if_vlan_var.h>
63
64#include <machine/bus.h>
65#include <machine/resource.h>
66#include <sys/bus.h>
67#include <sys/rman.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/miivar.h>
71
72#include <dev/pci/pcireg.h>
73#include <dev/pci/pcivar.h>
74
75#include <dev/stge/if_stgereg.h>
76
77#define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
78
79MODULE_DEPEND(stge, pci, 1, 1, 1);
80MODULE_DEPEND(stge, ether, 1, 1, 1);
81MODULE_DEPEND(stge, miibus, 1, 1, 1);
82
83/* "device miibus" required. See GENERIC if you get errors here. */
84#include "miibus_if.h"
85
86/*
87 * Devices supported by this driver.
88 */
89static struct stge_product {
90 uint16_t stge_vendorid;
91 uint16_t stge_deviceid;
92 const char *stge_name;
93} stge_products[] = {
94 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023,
95 "Sundance ST-1023 Gigabit Ethernet" },
96
97 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021,
98 "Sundance ST-2021 Gigabit Ethernet" },
99
100 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021,
101 "Tamarack TC9021 Gigabit Ethernet" },
102
103 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT,
104 "Tamarack TC9021 Gigabit Ethernet" },
105
106 /*
107 * The Sundance sample boards use the Sundance vendor ID,
108 * but the Tamarack product ID.
109 */
110 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021,
111 "Sundance TC9021 Gigabit Ethernet" },
112
113 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT,
114 "Sundance TC9021 Gigabit Ethernet" },
115
116 { VENDOR_DLINK, DEVICEID_DLINK_DL4000,
117 "D-Link DL-4000 Gigabit Ethernet" },
118
119 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021,
120 "Antares Gigabit Ethernet" }
121};
122
123static int stge_probe(device_t);
124static int stge_attach(device_t);
125static int stge_detach(device_t);
126static int stge_shutdown(device_t);
127static int stge_suspend(device_t);
128static int stge_resume(device_t);
129
130static int stge_encap(struct stge_softc *, struct mbuf **);
131static void stge_start(struct ifnet *);
132static void stge_start_locked(struct ifnet *);
133static void stge_watchdog(struct stge_softc *);
134static int stge_ioctl(struct ifnet *, u_long, caddr_t);
135static void stge_init(void *);
136static void stge_init_locked(struct stge_softc *);
137static void stge_vlan_setup(struct stge_softc *);
138static void stge_stop(struct stge_softc *);
139static void stge_start_tx(struct stge_softc *);
140static void stge_start_rx(struct stge_softc *);
141static void stge_stop_tx(struct stge_softc *);
142static void stge_stop_rx(struct stge_softc *);
143
144static void stge_reset(struct stge_softc *, uint32_t);
145static int stge_eeprom_wait(struct stge_softc *);
146static void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
147static void stge_tick(void *);
148static void stge_stats_update(struct stge_softc *);
149static void stge_set_filter(struct stge_softc *);
150static void stge_set_multi(struct stge_softc *);
151
152static void stge_link_task(void *, int);
153static void stge_intr(void *);
154static __inline int stge_tx_error(struct stge_softc *);
155static void stge_txeof(struct stge_softc *);
156static int stge_rxeof(struct stge_softc *);
157static __inline void stge_discard_rxbuf(struct stge_softc *, int);
158static int stge_newbuf(struct stge_softc *, int);
159#ifndef __NO_STRICT_ALIGNMENT
160static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
161#endif
162
163static void stge_mii_sync(struct stge_softc *);
164static void stge_mii_send(struct stge_softc *, uint32_t, int);
165static int stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
166static int stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
167static int stge_miibus_readreg(device_t, int, int);
168static int stge_miibus_writereg(device_t, int, int, int);
169static void stge_miibus_statchg(device_t);
170static int stge_mediachange(struct ifnet *);
171static void stge_mediastatus(struct ifnet *, struct ifmediareq *);
172
173static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
174static int stge_dma_alloc(struct stge_softc *);
175static void stge_dma_free(struct stge_softc *);
176static void stge_dma_wait(struct stge_softc *);
177static void stge_init_tx_ring(struct stge_softc *);
178static int stge_init_rx_ring(struct stge_softc *);
179#ifdef DEVICE_POLLING
180static int stge_poll(struct ifnet *, enum poll_cmd, int);
181#endif
182
183static void stge_setwol(struct stge_softc *);
184static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
185static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
186static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
187
188static device_method_t stge_methods[] = {
189 /* Device interface */
190 DEVMETHOD(device_probe, stge_probe),
191 DEVMETHOD(device_attach, stge_attach),
192 DEVMETHOD(device_detach, stge_detach),
193 DEVMETHOD(device_shutdown, stge_shutdown),
194 DEVMETHOD(device_suspend, stge_suspend),
195 DEVMETHOD(device_resume, stge_resume),
196
197 /* MII interface */
198 DEVMETHOD(miibus_readreg, stge_miibus_readreg),
199 DEVMETHOD(miibus_writereg, stge_miibus_writereg),
200 DEVMETHOD(miibus_statchg, stge_miibus_statchg),
201
202 { 0, 0 }
203
204};
205
206static driver_t stge_driver = {
207 "stge",
208 stge_methods,
209 sizeof(struct stge_softc)
210};
211
212static devclass_t stge_devclass;
213
214DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
215DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
216
217static struct resource_spec stge_res_spec_io[] = {
218 { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE },
219 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
220 { -1, 0, 0 }
221};
222
223static struct resource_spec stge_res_spec_mem[] = {
224 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE },
225 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
226 { -1, 0, 0 }
227};
228
229#define MII_SET(x) \
230 CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
231#define MII_CLR(x) \
232 CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
233
234/*
235 * Sync the PHYs by setting data bit and strobing the clock 32 times.
236 */
237static void
238stge_mii_sync(struct stge_softc *sc)
239{
240 int i;
241
242 MII_SET(PC_MgmtDir | PC_MgmtData);
243
244 for (i = 0; i < 32; i++) {
245 MII_SET(PC_MgmtClk);
246 DELAY(1);
247 MII_CLR(PC_MgmtClk);
248 DELAY(1);
249 }
250}
251
252/*
253 * Clock a series of bits through the MII.
254 */
255static void
256stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
257{
258 int i;
259
260 MII_CLR(PC_MgmtClk);
261
262 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
263 if (bits & i)
264 MII_SET(PC_MgmtData);
265 else
266 MII_CLR(PC_MgmtData);
267 DELAY(1);
268 MII_CLR(PC_MgmtClk);
269 DELAY(1);
270 MII_SET(PC_MgmtClk);
271 }
272}
273
274/*
275 * Read an PHY register through the MII.
276 */
277static int
278stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
279{
280 int i, ack;
281
282 /*
283 * Set up frame for RX.
284 */
285 frame->mii_stdelim = STGE_MII_STARTDELIM;
286 frame->mii_opcode = STGE_MII_READOP;
287 frame->mii_turnaround = 0;
288 frame->mii_data = 0;
289
290 CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
291 /*
292 * Turn on data xmit.
293 */
294 MII_SET(PC_MgmtDir);
295
296 stge_mii_sync(sc);
297
298 /*
299 * Send command/address info.
300 */
301 stge_mii_send(sc, frame->mii_stdelim, 2);
302 stge_mii_send(sc, frame->mii_opcode, 2);
303 stge_mii_send(sc, frame->mii_phyaddr, 5);
304 stge_mii_send(sc, frame->mii_regaddr, 5);
305
306 /* Turn off xmit. */
307 MII_CLR(PC_MgmtDir);
308
309 /* Idle bit */
310 MII_CLR((PC_MgmtClk | PC_MgmtData));
311 DELAY(1);
312 MII_SET(PC_MgmtClk);
313 DELAY(1);
314
315 /* Check for ack */
316 MII_CLR(PC_MgmtClk);
317 DELAY(1);
318 ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
319 MII_SET(PC_MgmtClk);
320 DELAY(1);
321
322 /*
323 * Now try reading data bits. If the ack failed, we still
324 * need to clock through 16 cycles to keep the PHY(s) in sync.
325 */
326 if (ack) {
327 for(i = 0; i < 16; i++) {
328 MII_CLR(PC_MgmtClk);
329 DELAY(1);
330 MII_SET(PC_MgmtClk);
331 DELAY(1);
332 }
333 goto fail;
334 }
335
336 for (i = 0x8000; i; i >>= 1) {
337 MII_CLR(PC_MgmtClk);
338 DELAY(1);
339 if (!ack) {
340 if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
341 frame->mii_data |= i;
342 DELAY(1);
343 }
344 MII_SET(PC_MgmtClk);
345 DELAY(1);
346 }
347
348fail:
349 MII_CLR(PC_MgmtClk);
350 DELAY(1);
351 MII_SET(PC_MgmtClk);
352 DELAY(1);
353
354 if (ack)
355 return(1);
356 return(0);
357}
358
359/*
360 * Write to a PHY register through the MII.
361 */
362static int
363stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
364{
365
366 /*
367 * Set up frame for TX.
368 */
369 frame->mii_stdelim = STGE_MII_STARTDELIM;
370 frame->mii_opcode = STGE_MII_WRITEOP;
371 frame->mii_turnaround = STGE_MII_TURNAROUND;
372
373 /*
374 * Turn on data output.
375 */
376 MII_SET(PC_MgmtDir);
377
378 stge_mii_sync(sc);
379
380 stge_mii_send(sc, frame->mii_stdelim, 2);
381 stge_mii_send(sc, frame->mii_opcode, 2);
382 stge_mii_send(sc, frame->mii_phyaddr, 5);
383 stge_mii_send(sc, frame->mii_regaddr, 5);
384 stge_mii_send(sc, frame->mii_turnaround, 2);
385 stge_mii_send(sc, frame->mii_data, 16);
386
387 /* Idle bit. */
388 MII_SET(PC_MgmtClk);
389 DELAY(1);
390 MII_CLR(PC_MgmtClk);
391 DELAY(1);
392
393 /*
394 * Turn off xmit.
395 */
396 MII_CLR(PC_MgmtDir);
397
398 return(0);
399}
400
401/*
402 * sc_miibus_readreg: [mii interface function]
403 *
404 * Read a PHY register on the MII of the TC9021.
405 */
406static int
407stge_miibus_readreg(device_t dev, int phy, int reg)
408{
409 struct stge_softc *sc;
410 struct stge_mii_frame frame;
411 int error;
412
413 sc = device_get_softc(dev);
414
415 if (reg == STGE_PhyCtrl) {
416 /* XXX allow ip1000phy read STGE_PhyCtrl register. */
417 STGE_MII_LOCK(sc);
418 error = CSR_READ_1(sc, STGE_PhyCtrl);
419 STGE_MII_UNLOCK(sc);
420 return (error);
421 }
422 bzero(&frame, sizeof(frame));
423 frame.mii_phyaddr = phy;
424 frame.mii_regaddr = reg;
425
426 STGE_MII_LOCK(sc);
427 error = stge_mii_readreg(sc, &frame);
428 STGE_MII_UNLOCK(sc);
429
430 if (error != 0) {
431 /* Don't show errors for PHY probe request */
432 if (reg != 1)
433 device_printf(sc->sc_dev, "phy read fail\n");
434 return (0);
435 }
436 return (frame.mii_data);
437}
438
439/*
440 * stge_miibus_writereg: [mii interface function]
441 *
442 * Write a PHY register on the MII of the TC9021.
443 */
444static int
445stge_miibus_writereg(device_t dev, int phy, int reg, int val)
446{
447 struct stge_softc *sc;
448 struct stge_mii_frame frame;
449 int error;
450
451 sc = device_get_softc(dev);
452
453 bzero(&frame, sizeof(frame));
454 frame.mii_phyaddr = phy;
455 frame.mii_regaddr = reg;
456 frame.mii_data = val;
457
458 STGE_MII_LOCK(sc);
459 error = stge_mii_writereg(sc, &frame);
460 STGE_MII_UNLOCK(sc);
461
462 if (error != 0)
463 device_printf(sc->sc_dev, "phy write fail\n");
464 return (0);
465}
466
467/*
468 * stge_miibus_statchg: [mii interface function]
469 *
470 * Callback from MII layer when media changes.
471 */
472static void
473stge_miibus_statchg(device_t dev)
474{
475 struct stge_softc *sc;
476
477 sc = device_get_softc(dev);
478 taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
479}
480
481/*
482 * stge_mediastatus: [ifmedia interface function]
483 *
484 * Get the current interface media status.
485 */
486static void
487stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
488{
489 struct stge_softc *sc;
490 struct mii_data *mii;
491
492 sc = ifp->if_softc;
493 mii = device_get_softc(sc->sc_miibus);
494
495 mii_pollstat(mii);
496 ifmr->ifm_status = mii->mii_media_status;
497 ifmr->ifm_active = mii->mii_media_active;
498}
499
500/*
501 * stge_mediachange: [ifmedia interface function]
502 *
503 * Set hardware to newly-selected media.
504 */
505static int
506stge_mediachange(struct ifnet *ifp)
507{
508 struct stge_softc *sc;
509 struct mii_data *mii;
510
511 sc = ifp->if_softc;
512 mii = device_get_softc(sc->sc_miibus);
513 mii_mediachg(mii);
514
515 return (0);
516}
517
518static int
519stge_eeprom_wait(struct stge_softc *sc)
520{
521 int i;
522
523 for (i = 0; i < STGE_TIMEOUT; i++) {
524 DELAY(1000);
525 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
526 return (0);
527 }
528 return (1);
529}
530
531/*
532 * stge_read_eeprom:
533 *
534 * Read data from the serial EEPROM.
535 */
536static void
537stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
538{
539
540 if (stge_eeprom_wait(sc))
541 device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
542
543 CSR_WRITE_2(sc, STGE_EepromCtrl,
544 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
545 if (stge_eeprom_wait(sc))
546 device_printf(sc->sc_dev, "EEPROM read timed out\n");
547 *data = CSR_READ_2(sc, STGE_EepromData);
548}
549
550
551static int
552stge_probe(device_t dev)
553{
554 struct stge_product *sp;
555 int i;
556 uint16_t vendor, devid;
557
558 vendor = pci_get_vendor(dev);
559 devid = pci_get_device(dev);
560 sp = stge_products;
561 for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
562 i++, sp++) {
563 if (vendor == sp->stge_vendorid &&
564 devid == sp->stge_deviceid) {
565 device_set_desc(dev, sp->stge_name);
566 return (BUS_PROBE_DEFAULT);
567 }
568 }
569
570 return (ENXIO);
571}
572
573static int
574stge_attach(device_t dev)
575{
576 struct stge_softc *sc;
577 struct ifnet *ifp;
578 uint8_t enaddr[ETHER_ADDR_LEN];
579 int error, flags, i;
580 uint16_t cmd;
581 uint32_t val;
582
583 error = 0;
584 sc = device_get_softc(dev);
585 sc->sc_dev = dev;
586
587 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
588 MTX_DEF);
589 mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
590 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
591 TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
592
593 /*
594 * Map the device.
595 */
596 pci_enable_busmaster(dev);
597 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
598 val = pci_read_config(dev, PCIR_BAR(1), 4);
599 if ((val & 0x01) != 0)
600 sc->sc_spec = stge_res_spec_mem;
601 else {
602 val = pci_read_config(dev, PCIR_BAR(0), 4);
603 if ((val & 0x01) == 0) {
604 device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
605 error = ENXIO;
606 goto fail;
607 }
608 sc->sc_spec = stge_res_spec_io;
609 }
610 error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
611 if (error != 0) {
612 device_printf(dev, "couldn't allocate %s resources\n",
613 sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
614 goto fail;
615 }
616 sc->sc_rev = pci_get_revid(dev);
617
618 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
619 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
620 "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
621 sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
622
623 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
624 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
625 "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
626 sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
627
628 /* Pull in device tunables. */
629 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
630 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
631 "rxint_nframe", &sc->sc_rxint_nframe);
632 if (error == 0) {
633 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
634 sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
635 device_printf(dev, "rxint_nframe value out of range; "
636 "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
637 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
638 }
639 }
640
641 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
642 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
643 "rxint_dmawait", &sc->sc_rxint_dmawait);
644 if (error == 0) {
645 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
646 sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
647 device_printf(dev, "rxint_dmawait value out of range; "
648 "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
649 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
650 }
651 }
652
653 if ((error = stge_dma_alloc(sc) != 0))
654 goto fail;
655
656 /*
657 * Determine if we're copper or fiber. It affects how we
658 * reset the card.
659 */
660 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
661 sc->sc_usefiber = 1;
662 else
663 sc->sc_usefiber = 0;
664
665 /* Load LED configuration from EEPROM. */
666 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
667
668 /*
669 * Reset the chip to a known state.
670 */
671 STGE_LOCK(sc);
672 stge_reset(sc, STGE_RESET_FULL);
673 STGE_UNLOCK(sc);
674
675 /*
676 * Reading the station address from the EEPROM doesn't seem
677 * to work, at least on my sample boards. Instead, since
678 * the reset sequence does AutoInit, read it from the station
679 * address registers. For Sundance 1023 you can only read it
680 * from EEPROM.
681 */
682 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
683 uint16_t v;
684
685 v = CSR_READ_2(sc, STGE_StationAddress0);
686 enaddr[0] = v & 0xff;
687 enaddr[1] = v >> 8;
688 v = CSR_READ_2(sc, STGE_StationAddress1);
689 enaddr[2] = v & 0xff;
690 enaddr[3] = v >> 8;
691 v = CSR_READ_2(sc, STGE_StationAddress2);
692 enaddr[4] = v & 0xff;
693 enaddr[5] = v >> 8;
694 sc->sc_stge1023 = 0;
695 } else {
696 uint16_t myaddr[ETHER_ADDR_LEN / 2];
697 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
698 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
699 &myaddr[i]);
700 myaddr[i] = le16toh(myaddr[i]);
701 }
702 bcopy(myaddr, enaddr, sizeof(enaddr));
703 sc->sc_stge1023 = 1;
704 }
705
706 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
707 if (ifp == NULL) {
708 device_printf(sc->sc_dev, "failed to if_alloc()\n");
709 error = ENXIO;
710 goto fail;
711 }
712
713 ifp->if_softc = sc;
714 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
715 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
716 ifp->if_ioctl = stge_ioctl;
717 ifp->if_start = stge_start;
718 ifp->if_init = stge_init;
719 ifp->if_mtu = ETHERMTU;
720 ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
721 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
722 IFQ_SET_READY(&ifp->if_snd);
723 /* Revision B3 and earlier chips have checksum bug. */
724 if (sc->sc_rev >= 0x0c) {
725 ifp->if_hwassist = STGE_CSUM_FEATURES;
726 ifp->if_capabilities = IFCAP_HWCSUM;
727 } else {
728 ifp->if_hwassist = 0;
729 ifp->if_capabilities = 0;
730 }
731 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
732 ifp->if_capenable = ifp->if_capabilities;
733
734 /*
735 * Read some important bits from the PhyCtrl register.
736 */
737 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
738 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
739
740 /* Set up MII bus. */
741 flags = 0;
741 flags = MIIF_DOPAUSE;
742 if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
743 flags |= MIIF_MACPRIV0;
744 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
745 stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
746 flags);
747 if (error != 0) {
748 device_printf(sc->sc_dev, "attaching PHYs failed\n");
749 goto fail;
750 }
751
752 ether_ifattach(ifp, enaddr);
753
754 /* VLAN capability setup */
755 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
756 if (sc->sc_rev >= 0x0c)
757 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
758 ifp->if_capenable = ifp->if_capabilities;
759#ifdef DEVICE_POLLING
760 ifp->if_capabilities |= IFCAP_POLLING;
761#endif
762 /*
763 * Tell the upper layer(s) we support long frames.
764 * Must appear after the call to ether_ifattach() because
765 * ether_ifattach() sets ifi_hdrlen to the default value.
766 */
767 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
768
769 /*
770 * The manual recommends disabling early transmit, so we
771 * do. It's disabled anyway, if using IP checksumming,
772 * since the entire packet must be in the FIFO in order
773 * for the chip to perform the checksum.
774 */
775 sc->sc_txthresh = 0x0fff;
776
777 /*
778 * Disable MWI if the PCI layer tells us to.
779 */
780 sc->sc_DMACtrl = 0;
781 if ((cmd & PCIM_CMD_MWRICEN) == 0)
782 sc->sc_DMACtrl |= DMAC_MWIDisable;
783
784 /*
785 * Hookup IRQ
786 */
787 error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
788 NULL, stge_intr, sc, &sc->sc_ih);
789 if (error != 0) {
790 ether_ifdetach(ifp);
791 device_printf(sc->sc_dev, "couldn't set up IRQ\n");
792 sc->sc_ifp = NULL;
793 goto fail;
794 }
795
796fail:
797 if (error != 0)
798 stge_detach(dev);
799
800 return (error);
801}
802
803static int
804stge_detach(device_t dev)
805{
806 struct stge_softc *sc;
807 struct ifnet *ifp;
808
809 sc = device_get_softc(dev);
810
811 ifp = sc->sc_ifp;
812#ifdef DEVICE_POLLING
813 if (ifp && ifp->if_capenable & IFCAP_POLLING)
814 ether_poll_deregister(ifp);
815#endif
816 if (device_is_attached(dev)) {
817 STGE_LOCK(sc);
818 /* XXX */
819 sc->sc_detach = 1;
820 stge_stop(sc);
821 STGE_UNLOCK(sc);
822 callout_drain(&sc->sc_tick_ch);
823 taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
824 ether_ifdetach(ifp);
825 }
826
827 if (sc->sc_miibus != NULL) {
828 device_delete_child(dev, sc->sc_miibus);
829 sc->sc_miibus = NULL;
830 }
831 bus_generic_detach(dev);
832 stge_dma_free(sc);
833
834 if (ifp != NULL) {
835 if_free(ifp);
836 sc->sc_ifp = NULL;
837 }
838
839 if (sc->sc_ih) {
840 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
841 sc->sc_ih = NULL;
842 }
843 bus_release_resources(dev, sc->sc_spec, sc->sc_res);
844
845 mtx_destroy(&sc->sc_mii_mtx);
846 mtx_destroy(&sc->sc_mtx);
847
848 return (0);
849}
850
851struct stge_dmamap_arg {
852 bus_addr_t stge_busaddr;
853};
854
855static void
856stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
857{
858 struct stge_dmamap_arg *ctx;
859
860 if (error != 0)
861 return;
862
863 ctx = (struct stge_dmamap_arg *)arg;
864 ctx->stge_busaddr = segs[0].ds_addr;
865}
866
867static int
868stge_dma_alloc(struct stge_softc *sc)
869{
870 struct stge_dmamap_arg ctx;
871 struct stge_txdesc *txd;
872 struct stge_rxdesc *rxd;
873 int error, i;
874
875 /* create parent tag. */
876 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
877 1, 0, /* algnmnt, boundary */
878 STGE_DMA_MAXADDR, /* lowaddr */
879 BUS_SPACE_MAXADDR, /* highaddr */
880 NULL, NULL, /* filter, filterarg */
881 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
882 0, /* nsegments */
883 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
884 0, /* flags */
885 NULL, NULL, /* lockfunc, lockarg */
886 &sc->sc_cdata.stge_parent_tag);
887 if (error != 0) {
888 device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
889 goto fail;
890 }
891 /* create tag for Tx ring. */
892 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
893 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
894 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
895 BUS_SPACE_MAXADDR, /* highaddr */
896 NULL, NULL, /* filter, filterarg */
897 STGE_TX_RING_SZ, /* maxsize */
898 1, /* nsegments */
899 STGE_TX_RING_SZ, /* maxsegsize */
900 0, /* flags */
901 NULL, NULL, /* lockfunc, lockarg */
902 &sc->sc_cdata.stge_tx_ring_tag);
903 if (error != 0) {
904 device_printf(sc->sc_dev,
905 "failed to allocate Tx ring DMA tag\n");
906 goto fail;
907 }
908
909 /* create tag for Rx ring. */
910 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
911 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
912 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
913 BUS_SPACE_MAXADDR, /* highaddr */
914 NULL, NULL, /* filter, filterarg */
915 STGE_RX_RING_SZ, /* maxsize */
916 1, /* nsegments */
917 STGE_RX_RING_SZ, /* maxsegsize */
918 0, /* flags */
919 NULL, NULL, /* lockfunc, lockarg */
920 &sc->sc_cdata.stge_rx_ring_tag);
921 if (error != 0) {
922 device_printf(sc->sc_dev,
923 "failed to allocate Rx ring DMA tag\n");
924 goto fail;
925 }
926
927 /* create tag for Tx buffers. */
928 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
929 1, 0, /* algnmnt, boundary */
930 BUS_SPACE_MAXADDR, /* lowaddr */
931 BUS_SPACE_MAXADDR, /* highaddr */
932 NULL, NULL, /* filter, filterarg */
933 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */
934 STGE_MAXTXSEGS, /* nsegments */
935 MCLBYTES, /* maxsegsize */
936 0, /* flags */
937 NULL, NULL, /* lockfunc, lockarg */
938 &sc->sc_cdata.stge_tx_tag);
939 if (error != 0) {
940 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
941 goto fail;
942 }
943
944 /* create tag for Rx buffers. */
945 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
946 1, 0, /* algnmnt, boundary */
947 BUS_SPACE_MAXADDR, /* lowaddr */
948 BUS_SPACE_MAXADDR, /* highaddr */
949 NULL, NULL, /* filter, filterarg */
950 MCLBYTES, /* maxsize */
951 1, /* nsegments */
952 MCLBYTES, /* maxsegsize */
953 0, /* flags */
954 NULL, NULL, /* lockfunc, lockarg */
955 &sc->sc_cdata.stge_rx_tag);
956 if (error != 0) {
957 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
958 goto fail;
959 }
960
961 /* allocate DMA'able memory and load the DMA map for Tx ring. */
962 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
963 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
964 &sc->sc_cdata.stge_tx_ring_map);
965 if (error != 0) {
966 device_printf(sc->sc_dev,
967 "failed to allocate DMA'able memory for Tx ring\n");
968 goto fail;
969 }
970
971 ctx.stge_busaddr = 0;
972 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
973 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
974 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
975 if (error != 0 || ctx.stge_busaddr == 0) {
976 device_printf(sc->sc_dev,
977 "failed to load DMA'able memory for Tx ring\n");
978 goto fail;
979 }
980 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
981
982 /* allocate DMA'able memory and load the DMA map for Rx ring. */
983 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
984 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
985 &sc->sc_cdata.stge_rx_ring_map);
986 if (error != 0) {
987 device_printf(sc->sc_dev,
988 "failed to allocate DMA'able memory for Rx ring\n");
989 goto fail;
990 }
991
992 ctx.stge_busaddr = 0;
993 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
994 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
995 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
996 if (error != 0 || ctx.stge_busaddr == 0) {
997 device_printf(sc->sc_dev,
998 "failed to load DMA'able memory for Rx ring\n");
999 goto fail;
1000 }
1001 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1002
1003 /* create DMA maps for Tx buffers. */
1004 for (i = 0; i < STGE_TX_RING_CNT; i++) {
1005 txd = &sc->sc_cdata.stge_txdesc[i];
1006 txd->tx_m = NULL;
1007 txd->tx_dmamap = 0;
1008 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1009 &txd->tx_dmamap);
1010 if (error != 0) {
1011 device_printf(sc->sc_dev,
1012 "failed to create Tx dmamap\n");
1013 goto fail;
1014 }
1015 }
1016 /* create DMA maps for Rx buffers. */
1017 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1018 &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1019 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1020 goto fail;
1021 }
1022 for (i = 0; i < STGE_RX_RING_CNT; i++) {
1023 rxd = &sc->sc_cdata.stge_rxdesc[i];
1024 rxd->rx_m = NULL;
1025 rxd->rx_dmamap = 0;
1026 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1027 &rxd->rx_dmamap);
1028 if (error != 0) {
1029 device_printf(sc->sc_dev,
1030 "failed to create Rx dmamap\n");
1031 goto fail;
1032 }
1033 }
1034
1035fail:
1036 return (error);
1037}
1038
1039static void
1040stge_dma_free(struct stge_softc *sc)
1041{
1042 struct stge_txdesc *txd;
1043 struct stge_rxdesc *rxd;
1044 int i;
1045
1046 /* Tx ring */
1047 if (sc->sc_cdata.stge_tx_ring_tag) {
1048 if (sc->sc_cdata.stge_tx_ring_map)
1049 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1050 sc->sc_cdata.stge_tx_ring_map);
1051 if (sc->sc_cdata.stge_tx_ring_map &&
1052 sc->sc_rdata.stge_tx_ring)
1053 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1054 sc->sc_rdata.stge_tx_ring,
1055 sc->sc_cdata.stge_tx_ring_map);
1056 sc->sc_rdata.stge_tx_ring = NULL;
1057 sc->sc_cdata.stge_tx_ring_map = 0;
1058 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1059 sc->sc_cdata.stge_tx_ring_tag = NULL;
1060 }
1061 /* Rx ring */
1062 if (sc->sc_cdata.stge_rx_ring_tag) {
1063 if (sc->sc_cdata.stge_rx_ring_map)
1064 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1065 sc->sc_cdata.stge_rx_ring_map);
1066 if (sc->sc_cdata.stge_rx_ring_map &&
1067 sc->sc_rdata.stge_rx_ring)
1068 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1069 sc->sc_rdata.stge_rx_ring,
1070 sc->sc_cdata.stge_rx_ring_map);
1071 sc->sc_rdata.stge_rx_ring = NULL;
1072 sc->sc_cdata.stge_rx_ring_map = 0;
1073 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1074 sc->sc_cdata.stge_rx_ring_tag = NULL;
1075 }
1076 /* Tx buffers */
1077 if (sc->sc_cdata.stge_tx_tag) {
1078 for (i = 0; i < STGE_TX_RING_CNT; i++) {
1079 txd = &sc->sc_cdata.stge_txdesc[i];
1080 if (txd->tx_dmamap) {
1081 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1082 txd->tx_dmamap);
1083 txd->tx_dmamap = 0;
1084 }
1085 }
1086 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1087 sc->sc_cdata.stge_tx_tag = NULL;
1088 }
1089 /* Rx buffers */
1090 if (sc->sc_cdata.stge_rx_tag) {
1091 for (i = 0; i < STGE_RX_RING_CNT; i++) {
1092 rxd = &sc->sc_cdata.stge_rxdesc[i];
1093 if (rxd->rx_dmamap) {
1094 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1095 rxd->rx_dmamap);
1096 rxd->rx_dmamap = 0;
1097 }
1098 }
1099 if (sc->sc_cdata.stge_rx_sparemap) {
1100 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1101 sc->sc_cdata.stge_rx_sparemap);
1102 sc->sc_cdata.stge_rx_sparemap = 0;
1103 }
1104 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1105 sc->sc_cdata.stge_rx_tag = NULL;
1106 }
1107
1108 if (sc->sc_cdata.stge_parent_tag) {
1109 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1110 sc->sc_cdata.stge_parent_tag = NULL;
1111 }
1112}
1113
1114/*
1115 * stge_shutdown:
1116 *
1117 * Make sure the interface is stopped at reboot time.
1118 */
1119static int
1120stge_shutdown(device_t dev)
1121{
1122
1123 return (stge_suspend(dev));
1124}
1125
1126static void
1127stge_setwol(struct stge_softc *sc)
1128{
1129 struct ifnet *ifp;
1130 uint8_t v;
1131
1132 STGE_LOCK_ASSERT(sc);
1133
1134 ifp = sc->sc_ifp;
1135 v = CSR_READ_1(sc, STGE_WakeEvent);
1136 /* Disable all WOL bits. */
1137 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
1138 WE_WakeOnLanEnable);
1139 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1140 v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
1141 CSR_WRITE_1(sc, STGE_WakeEvent, v);
1142 /* Reset Tx and prevent transmission. */
1143 CSR_WRITE_4(sc, STGE_AsicCtrl,
1144 CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
1145 /*
1146 * TC9021 automatically reset link speed to 100Mbps when it's put
1147 * into sleep so there is no need to try to resetting link speed.
1148 */
1149}
1150
1151static int
1152stge_suspend(device_t dev)
1153{
1154 struct stge_softc *sc;
1155
1156 sc = device_get_softc(dev);
1157
1158 STGE_LOCK(sc);
1159 stge_stop(sc);
1160 sc->sc_suspended = 1;
1161 stge_setwol(sc);
1162 STGE_UNLOCK(sc);
1163
1164 return (0);
1165}
1166
1167static int
1168stge_resume(device_t dev)
1169{
1170 struct stge_softc *sc;
1171 struct ifnet *ifp;
1172 uint8_t v;
1173
1174 sc = device_get_softc(dev);
1175
1176 STGE_LOCK(sc);
1177 /*
1178 * Clear WOL bits, so special frames wouldn't interfere
1179 * normal Rx operation anymore.
1180 */
1181 v = CSR_READ_1(sc, STGE_WakeEvent);
1182 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
1183 WE_WakeOnLanEnable);
1184 CSR_WRITE_1(sc, STGE_WakeEvent, v);
1185 ifp = sc->sc_ifp;
1186 if (ifp->if_flags & IFF_UP)
1187 stge_init_locked(sc);
1188
1189 sc->sc_suspended = 0;
1190 STGE_UNLOCK(sc);
1191
1192 return (0);
1193}
1194
1195static void
1196stge_dma_wait(struct stge_softc *sc)
1197{
1198 int i;
1199
1200 for (i = 0; i < STGE_TIMEOUT; i++) {
1201 DELAY(2);
1202 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1203 break;
1204 }
1205
1206 if (i == STGE_TIMEOUT)
1207 device_printf(sc->sc_dev, "DMA wait timed out\n");
1208}
1209
1210static int
1211stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1212{
1213 struct stge_txdesc *txd;
1214 struct stge_tfd *tfd;
1215 struct mbuf *m;
1216 bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1217 int error, i, nsegs, si;
1218 uint64_t csum_flags, tfc;
1219
1220 STGE_LOCK_ASSERT(sc);
1221
1222 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1223 return (ENOBUFS);
1224
1225 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1226 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1227 if (error == EFBIG) {
1228 m = m_collapse(*m_head, M_DONTWAIT, STGE_MAXTXSEGS);
1229 if (m == NULL) {
1230 m_freem(*m_head);
1231 *m_head = NULL;
1232 return (ENOMEM);
1233 }
1234 *m_head = m;
1235 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1236 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1237 if (error != 0) {
1238 m_freem(*m_head);
1239 *m_head = NULL;
1240 return (error);
1241 }
1242 } else if (error != 0)
1243 return (error);
1244 if (nsegs == 0) {
1245 m_freem(*m_head);
1246 *m_head = NULL;
1247 return (EIO);
1248 }
1249
1250 m = *m_head;
1251 csum_flags = 0;
1252 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1253 if (m->m_pkthdr.csum_flags & CSUM_IP)
1254 csum_flags |= TFD_IPChecksumEnable;
1255 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1256 csum_flags |= TFD_TCPChecksumEnable;
1257 else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1258 csum_flags |= TFD_UDPChecksumEnable;
1259 }
1260
1261 si = sc->sc_cdata.stge_tx_prod;
1262 tfd = &sc->sc_rdata.stge_tx_ring[si];
1263 for (i = 0; i < nsegs; i++)
1264 tfd->tfd_frags[i].frag_word0 =
1265 htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1266 FRAG_LEN(txsegs[i].ds_len));
1267 sc->sc_cdata.stge_tx_cnt++;
1268
1269 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1270 TFD_FragCount(nsegs) | csum_flags;
1271 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1272 tfc |= TFD_TxDMAIndicate;
1273
1274 /* Update producer index. */
1275 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1276
1277 /* Check if we have a VLAN tag to insert. */
1278 if (m->m_flags & M_VLANTAG)
1279 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1280 tfd->tfd_control = htole64(tfc);
1281
1282 /* Update Tx Queue. */
1283 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1284 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1285 txd->tx_m = m;
1286
1287 /* Sync descriptors. */
1288 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1289 BUS_DMASYNC_PREWRITE);
1290 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1291 sc->sc_cdata.stge_tx_ring_map,
1292 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1293
1294 return (0);
1295}
1296
1297/*
1298 * stge_start: [ifnet interface function]
1299 *
1300 * Start packet transmission on the interface.
1301 */
1302static void
1303stge_start(struct ifnet *ifp)
1304{
1305 struct stge_softc *sc;
1306
1307 sc = ifp->if_softc;
1308 STGE_LOCK(sc);
1309 stge_start_locked(ifp);
1310 STGE_UNLOCK(sc);
1311}
1312
1313static void
1314stge_start_locked(struct ifnet *ifp)
1315{
1316 struct stge_softc *sc;
1317 struct mbuf *m_head;
1318 int enq;
1319
1320 sc = ifp->if_softc;
1321
1322 STGE_LOCK_ASSERT(sc);
1323
1324 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1325 IFF_DRV_RUNNING || sc->sc_link == 0)
1326 return;
1327
1328 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1329 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1330 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1331 break;
1332 }
1333
1334 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1335 if (m_head == NULL)
1336 break;
1337 /*
1338 * Pack the data into the transmit ring. If we
1339 * don't have room, set the OACTIVE flag and wait
1340 * for the NIC to drain the ring.
1341 */
1342 if (stge_encap(sc, &m_head)) {
1343 if (m_head == NULL)
1344 break;
1345 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1346 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1347 break;
1348 }
1349
1350 enq++;
1351 /*
1352 * If there's a BPF listener, bounce a copy of this frame
1353 * to him.
1354 */
1355 ETHER_BPF_MTAP(ifp, m_head);
1356 }
1357
1358 if (enq > 0) {
1359 /* Transmit */
1360 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1361
1362 /* Set a timeout in case the chip goes out to lunch. */
1363 sc->sc_watchdog_timer = 5;
1364 }
1365}
1366
1367/*
1368 * stge_watchdog:
1369 *
1370 * Watchdog timer handler.
1371 */
1372static void
1373stge_watchdog(struct stge_softc *sc)
1374{
1375 struct ifnet *ifp;
1376
1377 STGE_LOCK_ASSERT(sc);
1378
1379 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1380 return;
1381
1382 ifp = sc->sc_ifp;
1383 if_printf(sc->sc_ifp, "device timeout\n");
1384 ifp->if_oerrors++;
1385 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1386 stge_init_locked(sc);
1387 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1388 stge_start_locked(ifp);
1389}
1390
1391/*
1392 * stge_ioctl: [ifnet interface function]
1393 *
1394 * Handle control requests from the operator.
1395 */
1396static int
1397stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1398{
1399 struct stge_softc *sc;
1400 struct ifreq *ifr;
1401 struct mii_data *mii;
1402 int error, mask;
1403
1404 sc = ifp->if_softc;
1405 ifr = (struct ifreq *)data;
1406 error = 0;
1407 switch (cmd) {
1408 case SIOCSIFMTU:
1409 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1410 error = EINVAL;
1411 else if (ifp->if_mtu != ifr->ifr_mtu) {
1412 ifp->if_mtu = ifr->ifr_mtu;
1413 STGE_LOCK(sc);
1414 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1415 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1416 stge_init_locked(sc);
1417 }
1418 STGE_UNLOCK(sc);
1419 }
1420 break;
1421 case SIOCSIFFLAGS:
1422 STGE_LOCK(sc);
1423 if ((ifp->if_flags & IFF_UP) != 0) {
1424 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1425 if (((ifp->if_flags ^ sc->sc_if_flags)
1426 & IFF_PROMISC) != 0)
1427 stge_set_filter(sc);
1428 } else {
1429 if (sc->sc_detach == 0)
1430 stge_init_locked(sc);
1431 }
1432 } else {
1433 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1434 stge_stop(sc);
1435 }
1436 sc->sc_if_flags = ifp->if_flags;
1437 STGE_UNLOCK(sc);
1438 break;
1439 case SIOCADDMULTI:
1440 case SIOCDELMULTI:
1441 STGE_LOCK(sc);
1442 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1443 stge_set_multi(sc);
1444 STGE_UNLOCK(sc);
1445 break;
1446 case SIOCSIFMEDIA:
1447 case SIOCGIFMEDIA:
1448 mii = device_get_softc(sc->sc_miibus);
1449 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1450 break;
1451 case SIOCSIFCAP:
1452 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1453#ifdef DEVICE_POLLING
1454 if ((mask & IFCAP_POLLING) != 0) {
1455 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1456 error = ether_poll_register(stge_poll, ifp);
1457 if (error != 0)
1458 break;
1459 STGE_LOCK(sc);
1460 CSR_WRITE_2(sc, STGE_IntEnable, 0);
1461 ifp->if_capenable |= IFCAP_POLLING;
1462 STGE_UNLOCK(sc);
1463 } else {
1464 error = ether_poll_deregister(ifp);
1465 if (error != 0)
1466 break;
1467 STGE_LOCK(sc);
1468 CSR_WRITE_2(sc, STGE_IntEnable,
1469 sc->sc_IntEnable);
1470 ifp->if_capenable &= ~IFCAP_POLLING;
1471 STGE_UNLOCK(sc);
1472 }
1473 }
1474#endif
1475 if ((mask & IFCAP_HWCSUM) != 0) {
1476 ifp->if_capenable ^= IFCAP_HWCSUM;
1477 if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1478 (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1479 ifp->if_hwassist = STGE_CSUM_FEATURES;
1480 else
1481 ifp->if_hwassist = 0;
1482 }
1483 if ((mask & IFCAP_WOL) != 0 &&
1484 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1485 if ((mask & IFCAP_WOL_MAGIC) != 0)
1486 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1487 }
1488 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1489 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1490 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1491 STGE_LOCK(sc);
1492 stge_vlan_setup(sc);
1493 STGE_UNLOCK(sc);
1494 }
1495 }
1496 VLAN_CAPABILITIES(ifp);
1497 break;
1498 default:
1499 error = ether_ioctl(ifp, cmd, data);
1500 break;
1501 }
1502
1503 return (error);
1504}
1505
1506static void
1507stge_link_task(void *arg, int pending)
1508{
1509 struct stge_softc *sc;
1510 struct mii_data *mii;
1511 uint32_t v, ac;
1512 int i;
1513
1514 sc = (struct stge_softc *)arg;
1515 STGE_LOCK(sc);
1516
1517 mii = device_get_softc(sc->sc_miibus);
1518 if (mii->mii_media_status & IFM_ACTIVE) {
1519 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1520 sc->sc_link = 1;
1521 } else
1522 sc->sc_link = 0;
1523
1524 sc->sc_MACCtrl = 0;
1525 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1526 sc->sc_MACCtrl |= MC_DuplexSelect;
1527 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
1527 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
1528 sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1529 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
1529 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
1530 sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1531 /*
1532 * Update STGE_MACCtrl register depending on link status.
1533 * (duplex, flow control etc)
1534 */
1535 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1536 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1537 v |= sc->sc_MACCtrl;
1538 CSR_WRITE_4(sc, STGE_MACCtrl, v);
1539 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1540 /* Duplex setting changed, reset Tx/Rx functions. */
1541 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1542 ac |= AC_TxReset | AC_RxReset;
1543 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1544 for (i = 0; i < STGE_TIMEOUT; i++) {
1545 DELAY(100);
1546 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1547 break;
1548 }
1549 if (i == STGE_TIMEOUT)
1550 device_printf(sc->sc_dev, "reset failed to complete\n");
1551 }
1552 STGE_UNLOCK(sc);
1553}
1554
1555static __inline int
1556stge_tx_error(struct stge_softc *sc)
1557{
1558 uint32_t txstat;
1559 int error;
1560
1561 for (error = 0;;) {
1562 txstat = CSR_READ_4(sc, STGE_TxStatus);
1563 if ((txstat & TS_TxComplete) == 0)
1564 break;
1565 /* Tx underrun */
1566 if ((txstat & TS_TxUnderrun) != 0) {
1567 /*
1568 * XXX
1569 * There should be a more better way to recover
1570 * from Tx underrun instead of a full reset.
1571 */
1572 if (sc->sc_nerr++ < STGE_MAXERR)
1573 device_printf(sc->sc_dev, "Tx underrun, "
1574 "resetting...\n");
1575 if (sc->sc_nerr == STGE_MAXERR)
1576 device_printf(sc->sc_dev, "too many errors; "
1577 "not reporting any more\n");
1578 error = -1;
1579 break;
1580 }
1581 /* Maximum/Late collisions, Re-enable Tx MAC. */
1582 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1583 CSR_WRITE_4(sc, STGE_MACCtrl,
1584 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1585 MC_TxEnable);
1586 }
1587
1588 return (error);
1589}
1590
1591/*
1592 * stge_intr:
1593 *
1594 * Interrupt service routine.
1595 */
1596static void
1597stge_intr(void *arg)
1598{
1599 struct stge_softc *sc;
1600 struct ifnet *ifp;
1601 int reinit;
1602 uint16_t status;
1603
1604 sc = (struct stge_softc *)arg;
1605 ifp = sc->sc_ifp;
1606
1607 STGE_LOCK(sc);
1608
1609#ifdef DEVICE_POLLING
1610 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1611 goto done_locked;
1612#endif
1613 status = CSR_READ_2(sc, STGE_IntStatus);
1614 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1615 goto done_locked;
1616
1617 /* Disable interrupts. */
1618 for (reinit = 0;;) {
1619 status = CSR_READ_2(sc, STGE_IntStatusAck);
1620 status &= sc->sc_IntEnable;
1621 if (status == 0)
1622 break;
1623 /* Host interface errors. */
1624 if ((status & IS_HostError) != 0) {
1625 device_printf(sc->sc_dev,
1626 "Host interface error, resetting...\n");
1627 reinit = 1;
1628 goto force_init;
1629 }
1630
1631 /* Receive interrupts. */
1632 if ((status & IS_RxDMAComplete) != 0) {
1633 stge_rxeof(sc);
1634 if ((status & IS_RFDListEnd) != 0)
1635 CSR_WRITE_4(sc, STGE_DMACtrl,
1636 DMAC_RxDMAPollNow);
1637 }
1638
1639 /* Transmit interrupts. */
1640 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1641 stge_txeof(sc);
1642
1643 /* Transmission errors.*/
1644 if ((status & IS_TxComplete) != 0) {
1645 if ((reinit = stge_tx_error(sc)) != 0)
1646 break;
1647 }
1648 }
1649
1650force_init:
1651 if (reinit != 0) {
1652 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1653 stge_init_locked(sc);
1654 }
1655
1656 /* Re-enable interrupts. */
1657 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1658
1659 /* Try to get more packets going. */
1660 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1661 stge_start_locked(ifp);
1662
1663done_locked:
1664 STGE_UNLOCK(sc);
1665}
1666
1667/*
1668 * stge_txeof:
1669 *
1670 * Helper; handle transmit interrupts.
1671 */
1672static void
1673stge_txeof(struct stge_softc *sc)
1674{
1675 struct ifnet *ifp;
1676 struct stge_txdesc *txd;
1677 uint64_t control;
1678 int cons;
1679
1680 STGE_LOCK_ASSERT(sc);
1681
1682 ifp = sc->sc_ifp;
1683
1684 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1685 if (txd == NULL)
1686 return;
1687 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1688 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1689
1690 /*
1691 * Go through our Tx list and free mbufs for those
1692 * frames which have been transmitted.
1693 */
1694 for (cons = sc->sc_cdata.stge_tx_cons;;
1695 cons = (cons + 1) % STGE_TX_RING_CNT) {
1696 if (sc->sc_cdata.stge_tx_cnt <= 0)
1697 break;
1698 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1699 if ((control & TFD_TFDDone) == 0)
1700 break;
1701 sc->sc_cdata.stge_tx_cnt--;
1702 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1703
1704 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1705 BUS_DMASYNC_POSTWRITE);
1706 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1707
1708 /* Output counter is updated with statistics register */
1709 m_freem(txd->tx_m);
1710 txd->tx_m = NULL;
1711 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1712 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1713 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1714 }
1715 sc->sc_cdata.stge_tx_cons = cons;
1716 if (sc->sc_cdata.stge_tx_cnt == 0)
1717 sc->sc_watchdog_timer = 0;
1718
1719 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1720 sc->sc_cdata.stge_tx_ring_map,
1721 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1722}
1723
1724static __inline void
1725stge_discard_rxbuf(struct stge_softc *sc, int idx)
1726{
1727 struct stge_rfd *rfd;
1728
1729 rfd = &sc->sc_rdata.stge_rx_ring[idx];
1730 rfd->rfd_status = 0;
1731}
1732
1733#ifndef __NO_STRICT_ALIGNMENT
1734/*
1735 * It seems that TC9021's DMA engine has alignment restrictions in
1736 * DMA scatter operations. The first DMA segment has no address
1737 * alignment restrictins but the rest should be aligned on 4(?) bytes
1738 * boundary. Otherwise it would corrupt random memory. Since we don't
1739 * know which one is used for the first segment in advance we simply
1740 * don't align at all.
1741 * To avoid copying over an entire frame to align, we allocate a new
1742 * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1743 * prepended into the existing mbuf chain.
1744 */
1745static __inline struct mbuf *
1746stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1747{
1748 struct mbuf *n;
1749
1750 n = NULL;
1751 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1752 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1753 m->m_data += ETHER_HDR_LEN;
1754 n = m;
1755 } else {
1756 MGETHDR(n, M_DONTWAIT, MT_DATA);
1757 if (n != NULL) {
1758 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1759 m->m_data += ETHER_HDR_LEN;
1760 m->m_len -= ETHER_HDR_LEN;
1761 n->m_len = ETHER_HDR_LEN;
1762 M_MOVE_PKTHDR(n, m);
1763 n->m_next = m;
1764 } else
1765 m_freem(m);
1766 }
1767
1768 return (n);
1769}
1770#endif
1771
1772/*
1773 * stge_rxeof:
1774 *
1775 * Helper; handle receive interrupts.
1776 */
1777static int
1778stge_rxeof(struct stge_softc *sc)
1779{
1780 struct ifnet *ifp;
1781 struct stge_rxdesc *rxd;
1782 struct mbuf *mp, *m;
1783 uint64_t status64;
1784 uint32_t status;
1785 int cons, prog, rx_npkts;
1786
1787 STGE_LOCK_ASSERT(sc);
1788
1789 rx_npkts = 0;
1790 ifp = sc->sc_ifp;
1791
1792 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1793 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1794
1795 prog = 0;
1796 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1797 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1798 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1799 status = RFD_RxStatus(status64);
1800 if ((status & RFD_RFDDone) == 0)
1801 break;
1802#ifdef DEVICE_POLLING
1803 if (ifp->if_capenable & IFCAP_POLLING) {
1804 if (sc->sc_cdata.stge_rxcycles <= 0)
1805 break;
1806 sc->sc_cdata.stge_rxcycles--;
1807 }
1808#endif
1809 prog++;
1810 rxd = &sc->sc_cdata.stge_rxdesc[cons];
1811 mp = rxd->rx_m;
1812
1813 /*
1814 * If the packet had an error, drop it. Note we count
1815 * the error later in the periodic stats update.
1816 */
1817 if ((status & RFD_FrameEnd) != 0 && (status &
1818 (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1819 RFD_RxAlignmentError | RFD_RxFCSError |
1820 RFD_RxLengthError)) != 0) {
1821 stge_discard_rxbuf(sc, cons);
1822 if (sc->sc_cdata.stge_rxhead != NULL) {
1823 m_freem(sc->sc_cdata.stge_rxhead);
1824 STGE_RXCHAIN_RESET(sc);
1825 }
1826 continue;
1827 }
1828 /*
1829 * Add a new receive buffer to the ring.
1830 */
1831 if (stge_newbuf(sc, cons) != 0) {
1832 ifp->if_iqdrops++;
1833 stge_discard_rxbuf(sc, cons);
1834 if (sc->sc_cdata.stge_rxhead != NULL) {
1835 m_freem(sc->sc_cdata.stge_rxhead);
1836 STGE_RXCHAIN_RESET(sc);
1837 }
1838 continue;
1839 }
1840
1841 if ((status & RFD_FrameEnd) != 0)
1842 mp->m_len = RFD_RxDMAFrameLen(status) -
1843 sc->sc_cdata.stge_rxlen;
1844 sc->sc_cdata.stge_rxlen += mp->m_len;
1845
1846 /* Chain mbufs. */
1847 if (sc->sc_cdata.stge_rxhead == NULL) {
1848 sc->sc_cdata.stge_rxhead = mp;
1849 sc->sc_cdata.stge_rxtail = mp;
1850 } else {
1851 mp->m_flags &= ~M_PKTHDR;
1852 sc->sc_cdata.stge_rxtail->m_next = mp;
1853 sc->sc_cdata.stge_rxtail = mp;
1854 }
1855
1856 if ((status & RFD_FrameEnd) != 0) {
1857 m = sc->sc_cdata.stge_rxhead;
1858 m->m_pkthdr.rcvif = ifp;
1859 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1860
1861 if (m->m_pkthdr.len > sc->sc_if_framesize) {
1862 m_freem(m);
1863 STGE_RXCHAIN_RESET(sc);
1864 continue;
1865 }
1866 /*
1867 * Set the incoming checksum information for
1868 * the packet.
1869 */
1870 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1871 if ((status & RFD_IPDetected) != 0) {
1872 m->m_pkthdr.csum_flags |=
1873 CSUM_IP_CHECKED;
1874 if ((status & RFD_IPError) == 0)
1875 m->m_pkthdr.csum_flags |=
1876 CSUM_IP_VALID;
1877 }
1878 if (((status & RFD_TCPDetected) != 0 &&
1879 (status & RFD_TCPError) == 0) ||
1880 ((status & RFD_UDPDetected) != 0 &&
1881 (status & RFD_UDPError) == 0)) {
1882 m->m_pkthdr.csum_flags |=
1883 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1884 m->m_pkthdr.csum_data = 0xffff;
1885 }
1886 }
1887
1888#ifndef __NO_STRICT_ALIGNMENT
1889 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1890 if ((m = stge_fixup_rx(sc, m)) == NULL) {
1891 STGE_RXCHAIN_RESET(sc);
1892 continue;
1893 }
1894 }
1895#endif
1896 /* Check for VLAN tagged packets. */
1897 if ((status & RFD_VLANDetected) != 0 &&
1898 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1899 m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1900 m->m_flags |= M_VLANTAG;
1901 }
1902
1903 STGE_UNLOCK(sc);
1904 /* Pass it on. */
1905 (*ifp->if_input)(ifp, m);
1906 STGE_LOCK(sc);
1907 rx_npkts++;
1908
1909 STGE_RXCHAIN_RESET(sc);
1910 }
1911 }
1912
1913 if (prog > 0) {
1914 /* Update the consumer index. */
1915 sc->sc_cdata.stge_rx_cons = cons;
1916 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1917 sc->sc_cdata.stge_rx_ring_map,
1918 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1919 }
1920 return (rx_npkts);
1921}
1922
1923#ifdef DEVICE_POLLING
1924static int
1925stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1926{
1927 struct stge_softc *sc;
1928 uint16_t status;
1929 int rx_npkts;
1930
1931 rx_npkts = 0;
1932 sc = ifp->if_softc;
1933 STGE_LOCK(sc);
1934 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1935 STGE_UNLOCK(sc);
1936 return (rx_npkts);
1937 }
1938
1939 sc->sc_cdata.stge_rxcycles = count;
1940 rx_npkts = stge_rxeof(sc);
1941 stge_txeof(sc);
1942
1943 if (cmd == POLL_AND_CHECK_STATUS) {
1944 status = CSR_READ_2(sc, STGE_IntStatus);
1945 status &= sc->sc_IntEnable;
1946 if (status != 0) {
1947 if ((status & IS_HostError) != 0) {
1948 device_printf(sc->sc_dev,
1949 "Host interface error, resetting...\n");
1950 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1951 stge_init_locked(sc);
1952 }
1953 if ((status & IS_TxComplete) != 0) {
1954 if (stge_tx_error(sc) != 0) {
1955 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1956 stge_init_locked(sc);
1957 }
1958 }
1959 }
1960
1961 }
1962
1963 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1964 stge_start_locked(ifp);
1965
1966 STGE_UNLOCK(sc);
1967 return (rx_npkts);
1968}
1969#endif /* DEVICE_POLLING */
1970
1971/*
1972 * stge_tick:
1973 *
1974 * One second timer, used to tick the MII.
1975 */
1976static void
1977stge_tick(void *arg)
1978{
1979 struct stge_softc *sc;
1980 struct mii_data *mii;
1981
1982 sc = (struct stge_softc *)arg;
1983
1984 STGE_LOCK_ASSERT(sc);
1985
1986 mii = device_get_softc(sc->sc_miibus);
1987 mii_tick(mii);
1988
1989 /* Update statistics counters. */
1990 stge_stats_update(sc);
1991
1992 /*
1993 * Relcaim any pending Tx descriptors to release mbufs in a
1994 * timely manner as we don't generate Tx completion interrupts
1995 * for every frame. This limits the delay to a maximum of one
1996 * second.
1997 */
1998 if (sc->sc_cdata.stge_tx_cnt != 0)
1999 stge_txeof(sc);
2000
2001 stge_watchdog(sc);
2002
2003 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2004}
2005
2006/*
2007 * stge_stats_update:
2008 *
2009 * Read the TC9021 statistics counters.
2010 */
2011static void
2012stge_stats_update(struct stge_softc *sc)
2013{
2014 struct ifnet *ifp;
2015
2016 STGE_LOCK_ASSERT(sc);
2017
2018 ifp = sc->sc_ifp;
2019
2020 CSR_READ_4(sc,STGE_OctetRcvOk);
2021
2022 ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
2023
2024 ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
2025
2026 CSR_READ_4(sc, STGE_OctetXmtdOk);
2027
2028 ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
2029
2030 ifp->if_collisions +=
2031 CSR_READ_4(sc, STGE_LateCollisions) +
2032 CSR_READ_4(sc, STGE_MultiColFrames) +
2033 CSR_READ_4(sc, STGE_SingleColFrames);
2034
2035 ifp->if_oerrors +=
2036 CSR_READ_2(sc, STGE_FramesAbortXSColls) +
2037 CSR_READ_2(sc, STGE_FramesWEXDeferal);
2038}
2039
2040/*
2041 * stge_reset:
2042 *
2043 * Perform a soft reset on the TC9021.
2044 */
2045static void
2046stge_reset(struct stge_softc *sc, uint32_t how)
2047{
2048 uint32_t ac;
2049 uint8_t v;
2050 int i, dv;
2051
2052 STGE_LOCK_ASSERT(sc);
2053
2054 dv = 5000;
2055 ac = CSR_READ_4(sc, STGE_AsicCtrl);
2056 switch (how) {
2057 case STGE_RESET_TX:
2058 ac |= AC_TxReset | AC_FIFO;
2059 dv = 100;
2060 break;
2061 case STGE_RESET_RX:
2062 ac |= AC_RxReset | AC_FIFO;
2063 dv = 100;
2064 break;
2065 case STGE_RESET_FULL:
2066 default:
2067 /*
2068 * Only assert RstOut if we're fiber. We need GMII clocks
2069 * to be present in order for the reset to complete on fiber
2070 * cards.
2071 */
2072 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
2073 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
2074 (sc->sc_usefiber ? AC_RstOut : 0);
2075 break;
2076 }
2077
2078 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2079
2080 /* Account for reset problem at 10Mbps. */
2081 DELAY(dv);
2082
2083 for (i = 0; i < STGE_TIMEOUT; i++) {
2084 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
2085 break;
2086 DELAY(dv);
2087 }
2088
2089 if (i == STGE_TIMEOUT)
2090 device_printf(sc->sc_dev, "reset failed to complete\n");
2091
2092 /* Set LED, from Linux IPG driver. */
2093 ac = CSR_READ_4(sc, STGE_AsicCtrl);
2094 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
2095 if ((sc->sc_led & 0x01) != 0)
2096 ac |= AC_LEDMode;
2097 if ((sc->sc_led & 0x03) != 0)
2098 ac |= AC_LEDModeBit1;
2099 if ((sc->sc_led & 0x08) != 0)
2100 ac |= AC_LEDSpeed;
2101 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2102
2103 /* Set PHY, from Linux IPG driver */
2104 v = CSR_READ_1(sc, STGE_PhySet);
2105 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
2106 v |= ((sc->sc_led & 0x70) >> 4);
2107 CSR_WRITE_1(sc, STGE_PhySet, v);
2108}
2109
2110/*
2111 * stge_init: [ ifnet interface function ]
2112 *
2113 * Initialize the interface.
2114 */
2115static void
2116stge_init(void *xsc)
2117{
2118 struct stge_softc *sc;
2119
2120 sc = (struct stge_softc *)xsc;
2121 STGE_LOCK(sc);
2122 stge_init_locked(sc);
2123 STGE_UNLOCK(sc);
2124}
2125
2126static void
2127stge_init_locked(struct stge_softc *sc)
2128{
2129 struct ifnet *ifp;
2130 struct mii_data *mii;
2131 uint16_t eaddr[3];
2132 uint32_t v;
2133 int error;
2134
2135 STGE_LOCK_ASSERT(sc);
2136
2137 ifp = sc->sc_ifp;
2138 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2139 return;
2140 mii = device_get_softc(sc->sc_miibus);
2141
2142 /*
2143 * Cancel any pending I/O.
2144 */
2145 stge_stop(sc);
2146
2147 /*
2148 * Reset the chip to a known state.
2149 */
2150 stge_reset(sc, STGE_RESET_FULL);
2151
2152 /* Init descriptors. */
2153 error = stge_init_rx_ring(sc);
2154 if (error != 0) {
2155 device_printf(sc->sc_dev,
2156 "initialization failed: no memory for rx buffers\n");
2157 stge_stop(sc);
2158 goto out;
2159 }
2160 stge_init_tx_ring(sc);
2161
2162 /* Set the station address. */
2163 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2164 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2165 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2166 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2167
2168 /*
2169 * Set the statistics masks. Disable all the RMON stats,
2170 * and disable selected stats in the non-RMON stats registers.
2171 */
2172 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2173 CSR_WRITE_4(sc, STGE_StatisticsMask,
2174 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2175 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2176 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2177 (1U << 21));
2178
2179 /* Set up the receive filter. */
2180 stge_set_filter(sc);
2181 /* Program multicast filter. */
2182 stge_set_multi(sc);
2183
2184 /*
2185 * Give the transmit and receive ring to the chip.
2186 */
2187 CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2188 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2189 CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2190 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2191
2192 CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2193 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2194 CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2195 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2196
2197 /*
2198 * Initialize the Tx auto-poll period. It's OK to make this number
2199 * large (255 is the max, but we use 127) -- we explicitly kick the
2200 * transmit engine when there's actually a packet.
2201 */
2202 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2203
2204 /* ..and the Rx auto-poll period. */
2205 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2206
2207 /* Initialize the Tx start threshold. */
2208 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2209
2210 /* Rx DMA thresholds, from Linux */
2211 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2212 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2213
2214 /* Rx early threhold, from Linux */
2215 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2216
2217 /* Tx DMA thresholds, from Linux */
2218 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2219 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2220
2221 /*
2222 * Initialize the Rx DMA interrupt control register. We
2223 * request an interrupt after every incoming packet, but
2224 * defer it for sc_rxint_dmawait us. When the number of
2225 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2226 * deferring the interrupt, and signal it immediately.
2227 */
2228 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2229 RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2230 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2231
2232 /*
2233 * Initialize the interrupt mask.
2234 */
2235 sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2236 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2237#ifdef DEVICE_POLLING
2238 /* Disable interrupts if we are polling. */
2239 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2240 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2241 else
2242#endif
2243 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2244
2245 /*
2246 * Configure the DMA engine.
2247 * XXX Should auto-tune TxBurstLimit.
2248 */
2249 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2250
2251 /*
2252 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2253 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2254 * in the Rx FIFO.
2255 */
2256 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2257 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2258
2259 /*
2260 * Set the maximum frame size.
2261 */
2262 sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2263 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2264
2265 /*
2266 * Initialize MacCtrl -- do it before setting the media,
2267 * as setting the media will actually program the register.
2268 *
2269 * Note: We have to poke the IFS value before poking
2270 * anything else.
2271 */
2272 /* Tx/Rx MAC should be disabled before programming IFS.*/
2273 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2274
2275 stge_vlan_setup(sc);
2276
2277 if (sc->sc_rev >= 6) { /* >= B.2 */
2278 /* Multi-frag frame bug work-around. */
2279 CSR_WRITE_2(sc, STGE_DebugCtrl,
2280 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2281
2282 /* Tx Poll Now bug work-around. */
2283 CSR_WRITE_2(sc, STGE_DebugCtrl,
2284 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2285 /* Tx Poll Now bug work-around. */
2286 CSR_WRITE_2(sc, STGE_DebugCtrl,
2287 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2288 }
2289
2290 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2291 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2292 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2293 /*
2294 * It seems that transmitting frames without checking the state of
2295 * Rx/Tx MAC wedge the hardware.
2296 */
2297 stge_start_tx(sc);
2298 stge_start_rx(sc);
2299
2300 sc->sc_link = 0;
2301 /*
2302 * Set the current media.
2303 */
2304 mii_mediachg(mii);
2305
2306 /*
2307 * Start the one second MII clock.
2308 */
2309 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2310
2311 /*
2312 * ...all done!
2313 */
2314 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2315 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2316
2317 out:
2318 if (error != 0)
2319 device_printf(sc->sc_dev, "interface not running\n");
2320}
2321
2322static void
2323stge_vlan_setup(struct stge_softc *sc)
2324{
2325 struct ifnet *ifp;
2326 uint32_t v;
2327
2328 ifp = sc->sc_ifp;
2329 /*
2330 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2331 * MC_AutoVLANuntagging bit.
2332 * MC_AutoVLANtagging bit selects which VLAN source to use
2333 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2334 * bit has priority over MC_AutoVLANtagging bit. So we always
2335 * use TFC instead of STGE_VLANTag register.
2336 */
2337 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2338 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2339 v |= MC_AutoVLANuntagging;
2340 else
2341 v &= ~MC_AutoVLANuntagging;
2342 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2343}
2344
2345/*
2346 * Stop transmission on the interface.
2347 */
2348static void
2349stge_stop(struct stge_softc *sc)
2350{
2351 struct ifnet *ifp;
2352 struct stge_txdesc *txd;
2353 struct stge_rxdesc *rxd;
2354 uint32_t v;
2355 int i;
2356
2357 STGE_LOCK_ASSERT(sc);
2358 /*
2359 * Stop the one second clock.
2360 */
2361 callout_stop(&sc->sc_tick_ch);
2362 sc->sc_watchdog_timer = 0;
2363
2364 /*
2365 * Disable interrupts.
2366 */
2367 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2368
2369 /*
2370 * Stop receiver, transmitter, and stats update.
2371 */
2372 stge_stop_rx(sc);
2373 stge_stop_tx(sc);
2374 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2375 v |= MC_StatisticsDisable;
2376 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2377
2378 /*
2379 * Stop the transmit and receive DMA.
2380 */
2381 stge_dma_wait(sc);
2382 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2383 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2384 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2385 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2386
2387 /*
2388 * Free RX and TX mbufs still in the queues.
2389 */
2390 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2391 rxd = &sc->sc_cdata.stge_rxdesc[i];
2392 if (rxd->rx_m != NULL) {
2393 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2394 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2395 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2396 rxd->rx_dmamap);
2397 m_freem(rxd->rx_m);
2398 rxd->rx_m = NULL;
2399 }
2400 }
2401 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2402 txd = &sc->sc_cdata.stge_txdesc[i];
2403 if (txd->tx_m != NULL) {
2404 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2405 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2406 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2407 txd->tx_dmamap);
2408 m_freem(txd->tx_m);
2409 txd->tx_m = NULL;
2410 }
2411 }
2412
2413 /*
2414 * Mark the interface down and cancel the watchdog timer.
2415 */
2416 ifp = sc->sc_ifp;
2417 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2418 sc->sc_link = 0;
2419}
2420
2421static void
2422stge_start_tx(struct stge_softc *sc)
2423{
2424 uint32_t v;
2425 int i;
2426
2427 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2428 if ((v & MC_TxEnabled) != 0)
2429 return;
2430 v |= MC_TxEnable;
2431 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2432 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2433 for (i = STGE_TIMEOUT; i > 0; i--) {
2434 DELAY(10);
2435 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2436 if ((v & MC_TxEnabled) != 0)
2437 break;
2438 }
2439 if (i == 0)
2440 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2441}
2442
2443static void
2444stge_start_rx(struct stge_softc *sc)
2445{
2446 uint32_t v;
2447 int i;
2448
2449 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2450 if ((v & MC_RxEnabled) != 0)
2451 return;
2452 v |= MC_RxEnable;
2453 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2454 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2455 for (i = STGE_TIMEOUT; i > 0; i--) {
2456 DELAY(10);
2457 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2458 if ((v & MC_RxEnabled) != 0)
2459 break;
2460 }
2461 if (i == 0)
2462 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2463}
2464
2465static void
2466stge_stop_tx(struct stge_softc *sc)
2467{
2468 uint32_t v;
2469 int i;
2470
2471 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2472 if ((v & MC_TxEnabled) == 0)
2473 return;
2474 v |= MC_TxDisable;
2475 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2476 for (i = STGE_TIMEOUT; i > 0; i--) {
2477 DELAY(10);
2478 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2479 if ((v & MC_TxEnabled) == 0)
2480 break;
2481 }
2482 if (i == 0)
2483 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2484}
2485
2486static void
2487stge_stop_rx(struct stge_softc *sc)
2488{
2489 uint32_t v;
2490 int i;
2491
2492 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2493 if ((v & MC_RxEnabled) == 0)
2494 return;
2495 v |= MC_RxDisable;
2496 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2497 for (i = STGE_TIMEOUT; i > 0; i--) {
2498 DELAY(10);
2499 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2500 if ((v & MC_RxEnabled) == 0)
2501 break;
2502 }
2503 if (i == 0)
2504 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2505}
2506
2507static void
2508stge_init_tx_ring(struct stge_softc *sc)
2509{
2510 struct stge_ring_data *rd;
2511 struct stge_txdesc *txd;
2512 bus_addr_t addr;
2513 int i;
2514
2515 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2516 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2517
2518 sc->sc_cdata.stge_tx_prod = 0;
2519 sc->sc_cdata.stge_tx_cons = 0;
2520 sc->sc_cdata.stge_tx_cnt = 0;
2521
2522 rd = &sc->sc_rdata;
2523 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2524 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2525 if (i == (STGE_TX_RING_CNT - 1))
2526 addr = STGE_TX_RING_ADDR(sc, 0);
2527 else
2528 addr = STGE_TX_RING_ADDR(sc, i + 1);
2529 rd->stge_tx_ring[i].tfd_next = htole64(addr);
2530 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2531 txd = &sc->sc_cdata.stge_txdesc[i];
2532 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2533 }
2534
2535 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2536 sc->sc_cdata.stge_tx_ring_map,
2537 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2538
2539}
2540
2541static int
2542stge_init_rx_ring(struct stge_softc *sc)
2543{
2544 struct stge_ring_data *rd;
2545 bus_addr_t addr;
2546 int i;
2547
2548 sc->sc_cdata.stge_rx_cons = 0;
2549 STGE_RXCHAIN_RESET(sc);
2550
2551 rd = &sc->sc_rdata;
2552 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2553 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2554 if (stge_newbuf(sc, i) != 0)
2555 return (ENOBUFS);
2556 if (i == (STGE_RX_RING_CNT - 1))
2557 addr = STGE_RX_RING_ADDR(sc, 0);
2558 else
2559 addr = STGE_RX_RING_ADDR(sc, i + 1);
2560 rd->stge_rx_ring[i].rfd_next = htole64(addr);
2561 rd->stge_rx_ring[i].rfd_status = 0;
2562 }
2563
2564 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2565 sc->sc_cdata.stge_rx_ring_map,
2566 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2567
2568 return (0);
2569}
2570
2571/*
2572 * stge_newbuf:
2573 *
2574 * Add a receive buffer to the indicated descriptor.
2575 */
2576static int
2577stge_newbuf(struct stge_softc *sc, int idx)
2578{
2579 struct stge_rxdesc *rxd;
2580 struct stge_rfd *rfd;
2581 struct mbuf *m;
2582 bus_dma_segment_t segs[1];
2583 bus_dmamap_t map;
2584 int nsegs;
2585
2586 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2587 if (m == NULL)
2588 return (ENOBUFS);
2589 m->m_len = m->m_pkthdr.len = MCLBYTES;
2590 /*
2591 * The hardware requires 4bytes aligned DMA address when JUMBO
2592 * frame is used.
2593 */
2594 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2595 m_adj(m, ETHER_ALIGN);
2596
2597 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2598 sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2599 m_freem(m);
2600 return (ENOBUFS);
2601 }
2602 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2603
2604 rxd = &sc->sc_cdata.stge_rxdesc[idx];
2605 if (rxd->rx_m != NULL) {
2606 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2607 BUS_DMASYNC_POSTREAD);
2608 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2609 }
2610 map = rxd->rx_dmamap;
2611 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2612 sc->sc_cdata.stge_rx_sparemap = map;
2613 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2614 BUS_DMASYNC_PREREAD);
2615 rxd->rx_m = m;
2616
2617 rfd = &sc->sc_rdata.stge_rx_ring[idx];
2618 rfd->rfd_frag.frag_word0 =
2619 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2620 rfd->rfd_status = 0;
2621
2622 return (0);
2623}
2624
2625/*
2626 * stge_set_filter:
2627 *
2628 * Set up the receive filter.
2629 */
2630static void
2631stge_set_filter(struct stge_softc *sc)
2632{
2633 struct ifnet *ifp;
2634 uint16_t mode;
2635
2636 STGE_LOCK_ASSERT(sc);
2637
2638 ifp = sc->sc_ifp;
2639
2640 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2641 mode |= RM_ReceiveUnicast;
2642 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2643 mode |= RM_ReceiveBroadcast;
2644 else
2645 mode &= ~RM_ReceiveBroadcast;
2646 if ((ifp->if_flags & IFF_PROMISC) != 0)
2647 mode |= RM_ReceiveAllFrames;
2648 else
2649 mode &= ~RM_ReceiveAllFrames;
2650
2651 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2652}
2653
2654static void
2655stge_set_multi(struct stge_softc *sc)
2656{
2657 struct ifnet *ifp;
2658 struct ifmultiaddr *ifma;
2659 uint32_t crc;
2660 uint32_t mchash[2];
2661 uint16_t mode;
2662 int count;
2663
2664 STGE_LOCK_ASSERT(sc);
2665
2666 ifp = sc->sc_ifp;
2667
2668 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2669 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2670 if ((ifp->if_flags & IFF_PROMISC) != 0)
2671 mode |= RM_ReceiveAllFrames;
2672 else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2673 mode |= RM_ReceiveMulticast;
2674 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2675 return;
2676 }
2677
2678 /* clear existing filters. */
2679 CSR_WRITE_4(sc, STGE_HashTable0, 0);
2680 CSR_WRITE_4(sc, STGE_HashTable1, 0);
2681
2682 /*
2683 * Set up the multicast address filter by passing all multicast
2684 * addresses through a CRC generator, and then using the low-order
2685 * 6 bits as an index into the 64 bit multicast hash table. The
2686 * high order bits select the register, while the rest of the bits
2687 * select the bit within the register.
2688 */
2689
2690 bzero(mchash, sizeof(mchash));
2691
2692 count = 0;
2693 if_maddr_rlock(sc->sc_ifp);
2694 TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2695 if (ifma->ifma_addr->sa_family != AF_LINK)
2696 continue;
2697 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2698 ifma->ifma_addr), ETHER_ADDR_LEN);
2699
2700 /* Just want the 6 least significant bits. */
2701 crc &= 0x3f;
2702
2703 /* Set the corresponding bit in the hash table. */
2704 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2705 count++;
2706 }
2707 if_maddr_runlock(ifp);
2708
2709 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2710 if (count > 0)
2711 mode |= RM_ReceiveMulticastHash;
2712 else
2713 mode &= ~RM_ReceiveMulticastHash;
2714
2715 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2716 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2717 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2718}
2719
2720static int
2721sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2722{
2723 int error, value;
2724
2725 if (!arg1)
2726 return (EINVAL);
2727 value = *(int *)arg1;
2728 error = sysctl_handle_int(oidp, &value, 0, req);
2729 if (error || !req->newptr)
2730 return (error);
2731 if (value < low || value > high)
2732 return (EINVAL);
2733 *(int *)arg1 = value;
2734
2735 return (0);
2736}
2737
2738static int
2739sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2740{
2741 return (sysctl_int_range(oidp, arg1, arg2, req,
2742 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2743}
2744
2745static int
2746sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2747{
2748 return (sysctl_int_range(oidp, arg1, arg2, req,
2749 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2750}