Deleted Added
sdiff udiff text old ( 163437 ) new ( 163503 )
full compact
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any

--- 7 unchanged lines hidden (view full) ---

16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 163437 2006-10-16 16:36:29Z obrien $");
25
26/* Uncomment the following line to enable polling. */
27/* #define DEVICE_POLLING */
28
29#define NFE_NO_JUMBO
30#define NFE_CSUM
31#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
32#define NVLAN 0
33
34#ifdef HAVE_KERNEL_OPTION_HEADERS
35#include "opt_device_polling.h"
36#endif
37
38#include <sys/param.h>
39#include <sys/endian.h>
40#include <sys/systm.h>

--- 29 unchanged lines hidden (view full) ---

70#include <dev/nfe/if_nfereg.h>
71#include <dev/nfe/if_nfevar.h>
72
73MODULE_DEPEND(nfe, pci, 1, 1, 1);
74MODULE_DEPEND(nfe, ether, 1, 1, 1);
75MODULE_DEPEND(nfe, miibus, 1, 1, 1);
76#include "miibus_if.h"
77
78static int nfe_probe (device_t);
79static int nfe_attach (device_t);
80static int nfe_detach (device_t);
81static void nfe_shutdown(device_t);
82static int nfe_miibus_readreg (device_t, int, int);
83static int nfe_miibus_writereg (device_t, int, int, int);
84static void nfe_miibus_statchg (device_t);
85static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
86static void nfe_intr(void *);
87static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
88static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
89static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
90static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
91static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
92static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
93static void nfe_rxeof(struct nfe_softc *);
94static void nfe_txeof(struct nfe_softc *);

--- 24 unchanged lines hidden (view full) ---

119static void nfe_set_macaddr(struct nfe_softc *, u_char *);
120static void nfe_dma_map_segs (void *, bus_dma_segment_t *, int, int);
121#ifdef DEVICE_POLLING
122static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
123#endif
124
125#ifdef NFE_DEBUG
126int nfedebug = 0;
127#define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
128#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
129#else
130#define DPRINTF(x)
131#define DPRINTFN(n,x)
132#endif
133
134#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
135#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
136#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
137
138#define letoh16(x) le16toh(x)
139
140#define NV_RID 0x10
141
142static device_method_t nfe_methods[] = {
143 /* Device interface */
144 DEVMETHOD(device_probe, nfe_probe),
145 DEVMETHOD(device_attach, nfe_attach),
146 DEVMETHOD(device_detach, nfe_detach),
147 DEVMETHOD(device_shutdown, nfe_shutdown),
148
149 /* bus interface */
150 DEVMETHOD(bus_print_child, bus_generic_print_child),
151 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
152
153 /* MII interface */
154 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
155 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
156 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
157
158 { 0, 0 }
159};
160
161static driver_t nfe_driver = {
162 "nfe",
163 nfe_methods,
164 sizeof(struct nfe_softc)
165};
166
167static devclass_t nfe_devclass;
168
169DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
170DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
171
172static struct nfe_type nfe_devs[] = {
173 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
174 "NVIDIA nForce MCP Networking Adapter"},
175 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
176 "NVIDIA nForce2 MCP2 Networking Adapter"},
177 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
178 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
179 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
180 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
181 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
182 "NVIDIA nForce3 MCP3 Networking Adapter"},
183 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
184 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
185 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
186 "NVIDIA nForce3 MCP7 Networking Adapter"},
187 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
188 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
190 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
192 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
194 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
196 "NVIDIA nForce 430 MCP12 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
198 "NVIDIA nForce 430 MCP13 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
200 "NVIDIA nForce MCP55 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
202 "NVIDIA nForce MCP55 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
204 "NVIDIA nForce MCP61 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
206 "NVIDIA nForce MCP61 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
208 "NVIDIA nForce MCP61 Networking Adapter"},
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
210 "NVIDIA nForce MCP61 Networking Adapter"},
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
212 "NVIDIA nForce MCP65 Networking Adapter"},
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
214 "NVIDIA nForce MCP65 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
216 "NVIDIA nForce MCP65 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
218 "NVIDIA nForce MCP65 Networking Adapter"},
219 {0, 0, NULL}
220};
221
222
223/* Probe for supported hardware ID's */
224static int
225nfe_probe(device_t dev)
226{

--- 8 unchanged lines hidden (view full) ---

235 return (0);
236 }
237 t++;
238 }
239
240 return (ENXIO);
241}
242
243static int
244nfe_attach(device_t dev)
245{
246 struct nfe_softc *sc;
247 struct ifnet *ifp;
248 int unit, error = 0, rid;
249
250 sc = device_get_softc(dev);
251 unit = device_get_unit(dev);
252 sc->nfe_dev = dev;
253 sc->nfe_unit = unit;
254
255 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
256 MTX_DEF | MTX_RECURSE);
257 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
258
259
260 pci_enable_busmaster(dev);
261
262 rid = NV_RID;
263 sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
264 0, ~0, 1, RF_ACTIVE);
265
266 if (sc->nfe_res == NULL) {
267 printf ("nfe%d: couldn't map ports/memory\n", unit);
268 error = ENXIO;
269 goto fail;

--- 31 unchanged lines hidden (view full) ---

301 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
302 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
303 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
304 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
305 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
306 break;
307 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
308 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
309 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN;
310 break;
311 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
312 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
313 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
314 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
315 sc->nfe_flags |= NFE_40BIT_ADDR;
316 break;
317 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
318 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
319 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
320 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
321 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
322 break;
323 }
324
325#ifndef NFE_NO_JUMBO
326 /* enable jumbo frames for adapters that support it */
327 if (sc->nfe_flags & NFE_JUMBO_SUP)
328 sc->nfe_flags |= NFE_USE_JUMBO;
329#endif
330
331 /*
332 * Allocate the parent bus DMA tag appropriate for PCI.
333 */
334#define NFE_NSEG_NEW 32
335 error = bus_dma_tag_create(NULL, /* parent */
336 1, 0, /* alignment, boundary */
337 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
338 BUS_SPACE_MAXADDR, /* highaddr */
339 NULL, NULL, /* filter, filterarg */
340 MAXBSIZE, NFE_NSEG_NEW, /* maxsize, nsegments */
341 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
342 BUS_DMA_ALLOCNOW, /* flags */
343 NULL, NULL, /* lockfunc, lockarg */
344 &sc->nfe_parent_tag);
345 if (error)
346 goto fail;
347
348 /*
349 * Allocate Tx and Rx rings.
350 */
351 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
352 printf("nfe%d: could not allocate Tx ring\n", unit);

--- 39 unchanged lines hidden (view full) ---

392#endif
393 ifp->if_capenable = ifp->if_capabilities;
394
395#ifdef DEVICE_POLLING
396 ifp->if_capabilities |= IFCAP_POLLING;
397#endif
398
399 /* Do MII setup */
400 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, nfe_ifmedia_sts)) {
401 printf("nfe%d: MII without any phy!\n", unit);
402 error = ENXIO;
403 goto fail;
404 }
405
406 ether_ifattach(ifp, sc->eaddr);
407
408 error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET|INTR_MPSAFE,
409 nfe_intr, sc, &sc->nfe_intrhand);
410
411 if (error) {
412 printf("nfe%d: couldn't set up irq\n", unit);
413 ether_ifdetach(ifp);
414 goto fail;
415 }
416
417fail:
418 if (error)
419 nfe_detach(dev);
420
421 return (error);
422}
423
424
425static int
426nfe_detach(device_t dev)
427{
428 struct nfe_softc *sc;
429 struct ifnet *ifp;
430 u_char eaddr[ETHER_ADDR_LEN];
431 int i;
432
433 sc = device_get_softc(dev);
434 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
435 ifp = sc->nfe_ifp;
436
437#ifdef DEVICE_POLLING
438 if (ifp->if_capenable & IFCAP_POLLING)
439 ether_poll_deregister(ifp);

--- 76 unchanged lines hidden (view full) ---

516
517 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
518
519 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
520 NFE_WRITE(sc, NFE_MISC1, misc);
521 NFE_WRITE(sc, NFE_LINKSPEED, link);
522}
523
524static int
525nfe_miibus_readreg(device_t dev, int phy, int reg)
526{
527 struct nfe_softc *sc = device_get_softc(dev);
528 u_int32_t val;
529 int ntries;
530
531 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);

--- 19 unchanged lines hidden (view full) ---

551 DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
552 return 0;
553 }
554
555 val = NFE_READ(sc, NFE_PHY_DATA);
556 if (val != 0xffffffff && val != 0)
557 sc->mii_phyaddr = phy;
558
559 DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n", sc->nfe_unit, phy, reg, val));
560
561 return val;
562}
563
564static int
565nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
566{
567 struct nfe_softc *sc = device_get_softc(dev);
568 u_int32_t ctl;
569 int ntries;
570
571 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
572
573 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
574 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
575 DELAY(100);
576 }
577

--- 8 unchanged lines hidden (view full) ---

586 }
587#ifdef NFE_DEBUG
588 if (nfedebug >= 2 && ntries == 1000)
589 printf("could not write to PHY\n");
590#endif
591 return 0;
592}
593
594static int
595nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
596{
597 struct nfe_desc32 *desc32;
598 struct nfe_desc64 *desc64;
599 struct nfe_rx_data *data;
600 struct nfe_jbuf *jbuf;
601 void **desc;

--- 6 unchanged lines hidden (view full) ---

608 } else {
609 desc = (void **)&ring->desc32;
610 descsize = sizeof (struct nfe_desc32);
611 }
612
613 ring->cur = ring->next = 0;
614 ring->bufsz = MCLBYTES;
615
616 error = bus_dma_tag_create(sc->nfe_parent_tag,
617 PAGE_SIZE, 0, /* alignment, boundary */
618 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
619 BUS_SPACE_MAXADDR, /* highaddr */
620 NULL, NULL, /* filter, filterarg */
621 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
622 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
623 BUS_DMA_ALLOCNOW, /* flags */
624 NULL, NULL, /* lockfunc, lockarg */
625 &ring->rx_desc_tag);
626 if (error != 0) {
627 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
628 goto fail;
629 }
630
631 /* allocate memory to desc */
632 error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->rx_desc_map);
633 if (error != 0) {
634 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
635 goto fail;
636 }
637
638 /* map desc to device visible address space */
639 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
640 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->rx_desc_segs, BUS_DMA_NOWAIT);
641 if (error != 0) {
642 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
643 goto fail;
644 }
645
646 bzero(*desc, NFE_RX_RING_COUNT * descsize);
647 ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
648 ring->physaddr = ring->rx_desc_addr;
649
650 if (sc->nfe_flags & NFE_USE_JUMBO) {
651 ring->bufsz = NFE_JBYTES;
652 if ((error = nfe_jpool_alloc(sc)) != 0) {
653 printf("nfe%d: could not allocate jumbo frames\n", sc->nfe_unit);
654 goto fail;
655 }
656 }
657
658 /*
659 * Pre-allocate Rx buffers and populate Rx ring.
660 */
661 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
662 data = &sc->rxq.data[i];
663
664 MGETHDR(data->m, M_DONTWAIT, MT_DATA);
665 if (data->m == NULL) {
666 printf("nfe%d: could not allocate rx mbuf\n", sc->nfe_unit);
667 error = ENOMEM;
668 goto fail;
669 }
670
671 if (sc->nfe_flags & NFE_USE_JUMBO) {
672 if ((jbuf = nfe_jalloc(sc)) == NULL) {
673 printf("nfe%d: could not allocate jumbo buffer\n", sc->nfe_unit);
674 goto fail;
675 }
676 data->m->m_data = (void *)jbuf->buf;
677 data->m->m_len = data->m->m_pkthdr.len = NFE_JBYTES;
678 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, nfe_jfree, (struct nfe_softc *)sc, 0, EXT_NET_DRV);
679 /* m_adj(data->m, ETHER_ALIGN); */
680 physaddr = jbuf->physaddr;
681 } else {
682 error = bus_dma_tag_create(sc->nfe_parent_tag,
683 ETHER_ALIGN, 0, /* alignment, boundary */
684 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
685 BUS_SPACE_MAXADDR, /* highaddr */
686 NULL, NULL, /* filter, filterarg */
687 MCLBYTES, 1, /* maxsize, nsegments */
688 MCLBYTES, /* maxsegsize */
689 BUS_DMA_ALLOCNOW, /* flags */
690 NULL, NULL, /* lockfunc, lockarg */
691 &data->rx_data_tag);
692 if (error != 0) {
693 printf("nfe%d: could not create DMA map\n", sc->nfe_unit);
694 goto fail;
695 }
696
697 error = bus_dmamap_create(data->rx_data_tag, 0, &data->rx_data_map);
698 if (error != 0) {
699 printf("nfe%d: could not allocate mbuf cluster\n", sc->nfe_unit);
700 goto fail;
701 }
702
703 MCLGET(data->m, M_DONTWAIT);
704 if (!(data->m->m_flags & M_EXT)) {
705 error = ENOMEM;
706 goto fail;
707 }
708
709 error = bus_dmamap_load(data->rx_data_tag, data->rx_data_map, mtod(data->m, void *),
710 MCLBYTES, nfe_dma_map_segs, &data->rx_data_segs, BUS_DMA_NOWAIT);
711 if (error != 0) {
712 printf("nfe%d: could not load rx buf DMA map\n", sc->nfe_unit);
713 goto fail;
714 }
715
716 data->rx_data_addr = data->rx_data_segs.ds_addr;
717 physaddr = data->rx_data_addr;
718
719 }
720
721 if (sc->nfe_flags & NFE_40BIT_ADDR) {
722 desc64 = &sc->rxq.desc64[i];
723#if defined(__LP64__)
724 desc64->physaddr[0] = htole32(physaddr >> 32);
725#endif

--- 4 unchanged lines hidden (view full) ---

730 desc32 = &sc->rxq.desc32[i];
731 desc32->physaddr = htole32(physaddr);
732 desc32->length = htole16(sc->rxq.bufsz);
733 desc32->flags = htole16(NFE_RX_READY);
734 }
735
736 }
737
738 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
739
740 return 0;
741
742fail: nfe_free_rx_ring(sc, ring);
743
744 return error;
745}
746
747static int
748nfe_jpool_alloc(struct nfe_softc *sc)
749{
750 struct nfe_rx_ring *ring = &sc->rxq;
751 struct nfe_jbuf *jbuf;
752 bus_addr_t physaddr;
753 caddr_t buf;
754 int i, error;
755
756 /*
757 * Allocate a big chunk of DMA'able memory.
758 */
759 error = bus_dma_tag_create(sc->nfe_parent_tag,
760 PAGE_SIZE, 0, /* alignment, boundary */
761 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
762 BUS_SPACE_MAXADDR, /* highaddr */
763 NULL, NULL, /* filter, filterarg */
764 NFE_JPOOL_SIZE, 1, /* maxsize, nsegments */
765 NFE_JPOOL_SIZE, /* maxsegsize */
766 BUS_DMA_ALLOCNOW, /* flags */
767 NULL, NULL, /* lockfunc, lockarg */
768 &ring->rx_jumbo_tag);
769 if (error != 0) {
770 printf("nfe%d: could not create jumbo DMA tag\n", sc->nfe_unit);
771 goto fail;
772 }
773 error = bus_dmamem_alloc(ring->rx_jumbo_tag, (void **)&ring->jpool, BUS_DMA_NOWAIT, &ring->rx_jumbo_map);
774 if (error != 0) {
775 printf("nfe%d: could not create jumbo DMA memory\n", sc->nfe_unit);
776 goto fail;
777 }
778
779 error = bus_dmamap_load(ring->rx_jumbo_tag, ring->rx_jumbo_map, ring->jpool,
780 NFE_JPOOL_SIZE, nfe_dma_map_segs, &ring->rx_jumbo_segs, BUS_DMA_NOWAIT);
781 if (error != 0) {
782 printf("nfe%d: could not load jumbo DMA map\n", sc->nfe_unit);
783 goto fail;
784 }
785
786 /* ..and split it into 9KB chunks */
787 SLIST_INIT(&ring->jfreelist);
788

--- 22 unchanged lines hidden (view full) ---

811
812static void
813nfe_jpool_free(struct nfe_softc *sc)
814{
815 struct nfe_rx_ring *ring = &sc->rxq;
816
817 if (ring->jpool != NULL) {
818#if 0
819 bus_dmamem_unmap(ring->rx_jumbo_tag, ring->jpool, NFE_JPOOL_SIZE);
820#endif
821 bus_dmamem_free(ring->rx_jumbo_tag, &ring->rx_jumbo_segs, ring->rx_jumbo_map);
822 }
823 if (ring->rx_jumbo_map != NULL) {
824 bus_dmamap_sync(ring->rx_jumbo_tag, ring->rx_jumbo_map, BUS_DMASYNC_POSTWRITE);
825 bus_dmamap_unload(ring->rx_jumbo_tag, ring->rx_jumbo_map);
826 bus_dmamap_destroy(ring->rx_jumbo_tag, ring->rx_jumbo_map);
827 }
828}
829
830static struct nfe_jbuf *
831nfe_jalloc(struct nfe_softc *sc)
832{
833 struct nfe_jbuf *jbuf;
834
835 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
836 if (jbuf == NULL)
837 return NULL;
838 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
839 return jbuf;
840}
841
842/*
843 * This is called automatically by the network stack when the mbuf is freed.
844 * Caution must be taken that the NIC might be reset by the time the mbuf is
845 * freed.
846 */
847static void
848nfe_jfree(void *buf, void *arg)
849{
850 struct nfe_softc *sc = arg;
851 struct nfe_jbuf *jbuf;
852 int i;
853
854 /* find the jbuf from the base pointer */
855 i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
856 if (i < 0 || i >= NFE_JPOOL_COUNT) {
857 printf("nfe%d: request to free a buffer (%p) not managed by us\n", sc->nfe_unit, buf);
858 return;
859 }
860 jbuf = &sc->rxq.jbuf[i];
861
862 /* ..and put it back in the free list */
863 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
864}
865

--- 8 unchanged lines hidden (view full) ---

874 ring->desc64[i].length = htole16(ring->bufsz);
875 ring->desc64[i].flags = htole16(NFE_RX_READY);
876 } else {
877 ring->desc32[i].length = htole16(ring->bufsz);
878 ring->desc32[i].flags = htole16(NFE_RX_READY);
879 }
880 }
881
882 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
883
884 ring->cur = ring->next = 0;
885}
886
887
888static void
889nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
890{

--- 5 unchanged lines hidden (view full) ---

896 desc = ring->desc64;
897 descsize = sizeof (struct nfe_desc64);
898 } else {
899 desc = ring->desc32;
900 descsize = sizeof (struct nfe_desc32);
901 }
902
903 if (desc != NULL) {
904 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_POSTWRITE);
905 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
906 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
907 bus_dma_tag_destroy(ring->rx_desc_tag);
908 }
909
910
911 if (sc->nfe_flags & NFE_USE_JUMBO) {
912 nfe_jpool_free(sc);
913 } else {
914 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
915 data = &ring->data[i];
916
917 if (data->rx_data_map != NULL) {
918 bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD);
919 bus_dmamap_unload(data->rx_data_tag, data->rx_data_map);
920 bus_dmamap_destroy(data->rx_data_tag, data->rx_data_map);
921 bus_dma_tag_destroy(data->rx_data_tag);
922 }
923 if (data->m != NULL)
924 m_freem(data->m);
925 }
926 }
927}
928
929static int
930nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
931{
932 int i, error;
933 void **desc;
934 int descsize;
935
936 if (sc->nfe_flags & NFE_40BIT_ADDR) {
937 desc = (void **)&ring->desc64;
938 descsize = sizeof (struct nfe_desc64);
939 } else {
940 desc = (void **)&ring->desc32;
941 descsize = sizeof (struct nfe_desc32);
942 }
943
944 ring->queued = 0;
945 ring->cur = ring->next = 0;
946
947 error = bus_dma_tag_create(sc->nfe_parent_tag,
948 PAGE_SIZE, 0, /* alignment, boundary */
949 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
950 BUS_SPACE_MAXADDR, /* highaddr */
951 NULL, NULL, /* filter, filterarg */
952 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
953 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
954 BUS_DMA_ALLOCNOW, /* flags */
955 NULL, NULL, /* lockfunc, lockarg */
956 &ring->tx_desc_tag);
957 if (error != 0) {
958 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
959 goto fail;
960 }
961
962 error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->tx_desc_map);
963 if (error != 0) {
964 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
965 goto fail;
966 }
967
968 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
969 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs, BUS_DMA_NOWAIT);
970 if (error != 0) {
971 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
972 goto fail;
973 }
974
975 bzero(*desc, NFE_TX_RING_COUNT * descsize);
976
977 ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
978 ring->physaddr = ring->tx_desc_addr;
979
980 error = bus_dma_tag_create(sc->nfe_parent_tag,
981 ETHER_ALIGN, 0,
982 BUS_SPACE_MAXADDR_32BIT,
983 BUS_SPACE_MAXADDR,
984 NULL, NULL,
985 NFE_JBYTES, NFE_MAX_SCATTER,
986 NFE_JBYTES,
987 BUS_DMA_ALLOCNOW,
988 NULL, NULL,
989 &ring->tx_data_tag);
990 if (error != 0) {
991 printf("nfe%d: could not create DMA tag\n", sc->nfe_unit);
992 goto fail;
993 }
994
995 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
996 error = bus_dmamap_create(ring->tx_data_tag, 0, &ring->data[i].tx_data_map);
997 if (error != 0) {
998 printf("nfe%d: could not create DMA map\n", sc->nfe_unit);
999 goto fail;
1000 }
1001 }
1002
1003 return 0;
1004
1005fail: nfe_free_tx_ring(sc, ring);
1006 return error;

--- 10 unchanged lines hidden (view full) ---

1017 if (sc->nfe_flags & NFE_40BIT_ADDR)
1018 ring->desc64[i].flags = 0;
1019 else
1020 ring->desc32[i].flags = 0;
1021
1022 data = &ring->data[i];
1023
1024 if (data->m != NULL) {
1025 bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
1026 bus_dmamap_unload(ring->tx_data_tag, data->active);
1027 m_freem(data->m);
1028 data->m = NULL;
1029 }
1030 }
1031
1032 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_PREWRITE);
1033
1034 ring->queued = 0;
1035 ring->cur = ring->next = 0;
1036}
1037
1038static void
1039nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1040{
1041 struct nfe_tx_data *data;
1042 void *desc;
1043 int i, descsize;
1044
1045 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1046 desc = ring->desc64;
1047 descsize = sizeof (struct nfe_desc64);
1048 } else {
1049 desc = ring->desc32;
1050 descsize = sizeof (struct nfe_desc32);
1051 }
1052
1053 if (desc != NULL) {
1054 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_POSTWRITE);
1055 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1056 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1057 bus_dma_tag_destroy(ring->tx_desc_tag);
1058 }
1059
1060 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1061 data = &ring->data[i];
1062
1063 if (data->m != NULL) {
1064 bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
1065 bus_dmamap_unload(ring->tx_data_tag, data->active);
1066 m_freem(data->m);
1067 }
1068 }
1069
1070 /* ..and now actually destroy the DMA mappings */
1071 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1072 data = &ring->data[i];
1073 if (data->tx_data_map == NULL)
1074 continue;
1075 bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
1076 }
1077
1078 bus_dma_tag_destroy(ring->tx_data_tag);
1079}
1080
1081#ifdef DEVICE_POLLING
1082static poll_handler_t nfe_poll;
1083
1084static void
1085nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1086{
1087 struct nfe_softc *sc = ifp->if_softc;
1088
1089 NFE_LOCK(sc);
1090 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1091 nfe_poll_locked(ifp, cmd, count);

--- 15 unchanged lines hidden (view full) ---

1107
1108 sc->rxcycles = count;
1109 nfe_rxeof(sc);
1110 nfe_txeof(sc);
1111 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1112 nfe_start_locked(ifp);
1113
1114 if (cmd == POLL_AND_CHECK_STATUS) {
1115 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1116 return;
1117 }
1118 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1119
1120 if (r & NFE_IRQ_LINK) {
1121 NFE_READ(sc, NFE_PHY_STATUS);
1122 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1123 DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1124 }
1125 }
1126}
1127#endif /* DEVICE_POLLING */
1128
1129
1130static int
1131nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1132{
1133 int error = 0;
1134 struct nfe_softc *sc = ifp->if_softc;
1135 struct ifreq *ifr = (struct ifreq *) data;
1136 struct mii_data *mii;
1137
1138 switch (cmd) {
1139 case SIOCSIFMTU:
1140 if (ifr->ifr_mtu < ETHERMIN ||
1141 ((sc->nfe_flags & NFE_USE_JUMBO) &&
1142 ifr->ifr_mtu > ETHERMTU_JUMBO) ||
1143 (!(sc->nfe_flags & NFE_USE_JUMBO) &&
1144 ifr->ifr_mtu > ETHERMTU))
1145 error = EINVAL;
1146 else if (ifp->if_mtu != ifr->ifr_mtu) {
1147 ifp->if_mtu = ifr->ifr_mtu;
1148 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1149 nfe_init(sc);
1150 }
1151 break;
1152 case SIOCSIFFLAGS:
1153 NFE_LOCK(sc);
1154 if (ifp->if_flags & IFF_UP) {

--- 26 unchanged lines hidden (view full) ---

1181 }
1182 break;
1183 case SIOCSIFMEDIA:
1184 case SIOCGIFMEDIA:
1185 mii = device_get_softc(sc->nfe_miibus);
1186 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1187 break;
1188 case SIOCSIFCAP:
1189 {
1190 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1191#ifdef DEVICE_POLLING
1192 if (mask & IFCAP_POLLING) {
1193 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1194 error = ether_poll_register(nfe_poll, ifp);
1195 if (error)
1196 return(error);
1197 NFE_LOCK(sc);
1198 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1199 ifp->if_capenable |= IFCAP_POLLING;
1200 NFE_UNLOCK(sc);
1201 } else {
1202 error = ether_poll_deregister(ifp);
1203 /* Enable interrupt even in error case */
1204 NFE_LOCK(sc);
1205 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1206 ifp->if_capenable &= ~IFCAP_POLLING;
1207 NFE_UNLOCK(sc);
1208 }
1209 }
1210#endif
1211 if (mask & IFCAP_HWCSUM) {
1212 ifp->if_capenable ^= IFCAP_HWCSUM;
1213 if (IFCAP_HWCSUM & ifp->if_capenable &&
1214 IFCAP_HWCSUM & ifp->if_capabilities)
1215 ifp->if_hwassist = NFE_CSUM_FEATURES;
1216 else
1217 ifp->if_hwassist = 0;
1218 }
1219 }
1220 break;
1221
1222 default:
1223 error = ether_ioctl(ifp, cmd, data);
1224 break;
1225 }
1226
1227 return error;
1228}
1229
1230
1231static void nfe_intr(void *arg)
1232{
1233 struct nfe_softc *sc = arg;
1234 struct ifnet *ifp = sc->nfe_ifp;
1235 u_int32_t r;
1236
1237 NFE_LOCK(sc);
1238
1239#ifdef DEVICE_POLLING
1240 if (ifp->if_capenable & IFCAP_POLLING) {
1241 NFE_UNLOCK(sc);
1242 return;
1243 }
1244#endif
1245
1246 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1247 NFE_UNLOCK(sc);
1248 return; /* not for us */
1249 }
1250 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1251
1252 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
1253
1254 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1255

--- 5 unchanged lines hidden (view full) ---

1261
1262 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1263 /* check Rx ring */
1264 nfe_rxeof(sc);
1265 /* check Tx ring */
1266 nfe_txeof(sc);
1267 }
1268
1269 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1270
1271 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1272 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1273 nfe_start_locked(ifp);
1274
1275 NFE_UNLOCK(sc);
1276
1277 return;
1278}
1279
1280static void
1281nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1282{
1283 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1284}
1285
1286static void
1287nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1288{
1289 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1290}
1291
1292static void
1293nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
1294{
1295 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1296}
1297
1298static void
1299nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
1300{
1301 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1302}
1303
1304static void
1305nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1306{
1307 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1308}
1309
1310static void
1311nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1312{
1313
1314 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1315}
1316
1317static void nfe_rxeof(struct nfe_softc *sc)
1318{
1319 struct ifnet *ifp = sc->nfe_ifp;
1320 struct nfe_desc32 *desc32=NULL;
1321 struct nfe_desc64 *desc64=NULL;
1322 struct nfe_rx_data *data;
1323 struct nfe_jbuf *jbuf;
1324 struct mbuf *m, *mnew;
1325 bus_addr_t physaddr;

--- 143 unchanged lines hidden (view full) ---

1469 if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
1470 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1471 if (flags & NFE_RX_IP_CSUMOK_V2) {
1472 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1473 }
1474 if (flags & NFE_RX_UDP_CSUMOK_V2 ||
1475 flags & NFE_RX_TCP_CSUMOK_V2) {
1476 m->m_pkthdr.csum_flags |=
1477 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1478 m->m_pkthdr.csum_data = 0xffff;
1479 }
1480 }
1481#endif
1482
1483#if NVLAN > 1
1484 if (have_tag) {
1485 m->m_pkthdr.ether_vtag = vlan_tag;

--- 28 unchanged lines hidden (view full) ---

1514
1515 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
1516 }
1517
1518 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
1519 }
1520}
1521
1522static void nfe_txeof(struct nfe_softc *sc)
1523{
1524 struct ifnet *ifp = sc->nfe_ifp;
1525 struct nfe_desc32 *desc32;
1526 struct nfe_desc64 *desc64;
1527 struct nfe_tx_data *data = NULL;
1528 u_int16_t flags;
1529
1530 NFE_LOCK_ASSERT(sc);

--- 17 unchanged lines hidden (view full) ---

1548 data = &sc->txq.data[sc->txq.next];
1549
1550 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1551 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1552 goto skip;
1553
1554 if ((flags & NFE_TX_ERROR_V1) != 0) {
1555 printf("nfe%d: tx v1 error 0x%4b\n",
1556 sc->nfe_unit, flags, NFE_V1_TXERR);
1557
1558 ifp->if_oerrors++;
1559 } else
1560 ifp->if_opackets++;
1561 } else {
1562 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1563 goto skip;
1564
1565 if ((flags & NFE_TX_ERROR_V2) != 0) {
1566 printf("nfe%d: tx v1 error 0x%4b\n",
1567 sc->nfe_unit, flags, NFE_V2_TXERR);
1568
1569 ifp->if_oerrors++;
1570 } else
1571 ifp->if_opackets++;
1572 }
1573
1574 if (data->m == NULL) { /* should not get there */
1575 printf("nfe%d: last fragment bit w/o associated mbuf!\n",
1576 sc->nfe_unit);
1577 goto skip;
1578 }
1579
1580 /* last fragment of the mbuf chain transmitted */
1581 bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
1582 BUS_DMASYNC_POSTWRITE);
1583 bus_dmamap_unload(sc->txq.tx_data_tag, data->active);

--- 7 unchanged lines hidden (view full) ---

1591 }
1592
1593 if (data != NULL) { /* at least one slot freed */
1594 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1595 nfe_start_locked(ifp);
1596 }
1597}
1598
1599static int nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1600{
1601 struct nfe_desc32 *desc32=NULL;
1602 struct nfe_desc64 *desc64=NULL;
1603 struct nfe_tx_data *data=NULL;
1604 bus_dmamap_t map;
1605 u_int16_t flags = NFE_TX_VALID;
1606 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1607 int nsegs;
1608 int error, i;
1609
1610 map = sc->txq.data[sc->txq.cur].tx_data_map;
1611
1612 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
1613 &nsegs, BUS_DMA_NOWAIT);
1614
1615 if (error != 0) {
1616 printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit,

--- 40 unchanged lines hidden (view full) ---

1657 desc32->length = htole16(segs[i].ds_len - 1);
1658 desc32->flags = htole16(flags);
1659 }
1660
1661 /* csum flags and vtag belong to the first fragment only */
1662 if (nsegs > 1) {
1663 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1664 }
1665
1666 sc->txq.queued++;
1667 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1668 }
1669
1670 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1671 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1672 flags |= NFE_TX_LASTFRAG_V2;
1673 desc64->flags = htole16(flags);

--- 10 unchanged lines hidden (view full) ---

1684 data->nsegs = nsegs;
1685
1686 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
1687
1688 return 0;
1689}
1690
1691
1692static void nfe_setmulti(struct nfe_softc *sc)
1693{
1694 struct ifnet *ifp = sc->nfe_ifp;
1695 struct ifmultiaddr *ifma;
1696 u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1697 u_int32_t filter = NFE_RXFILTER_MAGIC;
1698 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
1699 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1700 int i;
1701
1702 NFE_LOCK_ASSERT(sc);
1703
1704 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1705 bzero(addr, ETHER_ADDR_LEN);
1706 bzero(mask, ETHER_ADDR_LEN);
1707 goto done;
1708 }

--- 32 unchanged lines hidden (view full) ---

1741 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1742 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1743 mask[5] << 8 | mask[4]);
1744
1745 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1746 NFE_WRITE(sc, NFE_RXFILTER, filter);
1747}
1748
1749static void nfe_start(struct ifnet *ifp)
1750{
1751 struct nfe_softc *sc;
1752
1753 sc = ifp->if_softc;
1754 NFE_LOCK(sc);
1755 nfe_start_locked(ifp);
1756 NFE_UNLOCK(sc);
1757}
1758
1759static void nfe_start_locked(struct ifnet *ifp)
1760{
1761 struct nfe_softc *sc = ifp->if_softc;
1762 int old = sc->txq.cur;
1763 struct mbuf *m0;
1764
1765 if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1766 return;
1767 }
1768
1769 for (;;) {
1770 IFQ_POLL(&ifp->if_snd, m0);
1771 if (m0 == NULL)

--- 24 unchanged lines hidden (view full) ---

1796 /*
1797 * Set a timeout in case the chip goes out to lunch.
1798 */
1799 ifp->if_timer = 5;
1800
1801 return;
1802}
1803
1804static void nfe_watchdog(struct ifnet *ifp)
1805{
1806 struct nfe_softc *sc = ifp->if_softc;
1807
1808 printf("nfe%d: watchdog timeout\n", sc->nfe_unit);
1809
1810 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1811 nfe_init(sc);
1812
1813 ifp->if_oerrors++;
1814
1815 return;
1816}
1817
1818static void nfe_init(void *xsc)
1819{
1820 struct nfe_softc *sc = xsc;
1821
1822 NFE_LOCK(sc);
1823 nfe_init_locked(sc);
1824 NFE_UNLOCK(sc);
1825
1826 return;
1827}
1828
1829static void nfe_init_locked(void *xsc)
1830{
1831 struct nfe_softc *sc = xsc;
1832 struct ifnet *ifp = sc->nfe_ifp;
1833 struct mii_data *mii;
1834 u_int32_t tmp;
1835
1836 NFE_LOCK_ASSERT(sc);
1837

--- 113 unchanged lines hidden (view full) ---

1951 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1952 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1953
1954 sc->nfe_link = 0;
1955
1956 return;
1957}
1958
1959static void nfe_stop(struct ifnet *ifp, int disable)
1960{
1961 struct nfe_softc *sc = ifp->if_softc;
1962 struct mii_data *mii;
1963
1964 NFE_LOCK_ASSERT(sc);
1965
1966 ifp->if_timer = 0;
1967 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);

--- 15 unchanged lines hidden (view full) ---

1983
1984 /* reset Tx and Rx rings */
1985 nfe_reset_tx_ring(sc, &sc->txq);
1986 nfe_reset_rx_ring(sc, &sc->rxq);
1987
1988 return;
1989}
1990
1991static int nfe_ifmedia_upd(struct ifnet *ifp)
1992{
1993 struct nfe_softc *sc = ifp->if_softc;
1994
1995 NFE_LOCK(sc);
1996 nfe_ifmedia_upd_locked(ifp);
1997 NFE_UNLOCK(sc);
1998 return (0);
1999}
2000
2001static int nfe_ifmedia_upd_locked(struct ifnet *ifp)
2002{
2003 struct nfe_softc *sc = ifp->if_softc;
2004 struct mii_data *mii;
2005
2006 NFE_LOCK_ASSERT(sc);
2007
2008 mii = device_get_softc(sc->nfe_miibus);
2009
2010 if (mii->mii_instance) {
2011 struct mii_softc *miisc;
2012 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2013 miisc = LIST_NEXT(miisc, mii_list)) {
2014 mii_phy_reset(miisc);
2015 }
2016 }
2017 mii_mediachg(mii);
2018
2019 return (0);
2020}
2021
2022static void nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2023{
2024 struct nfe_softc *sc;
2025 struct mii_data *mii;
2026
2027 sc = ifp->if_softc;
2028
2029 NFE_LOCK(sc);
2030 mii = device_get_softc(sc->nfe_miibus);
2031 mii_pollstat(mii);
2032 NFE_UNLOCK(sc);
2033
2034 ifmr->ifm_active = mii->mii_media_active;
2035 ifmr->ifm_status = mii->mii_media_status;
2036
2037 return;
2038}
2039
2040static void
2041nfe_tick(void *xsc)
2042{
2043 struct nfe_softc *sc;
2044
2045 sc = xsc;
2046
2047 NFE_LOCK(sc);
2048 nfe_tick_locked(sc);
2049 NFE_UNLOCK(sc);
2050}
2051
2052
2053void nfe_tick_locked(struct nfe_softc *arg)
2054{
2055 struct nfe_softc *sc;
2056 struct mii_data *mii;
2057 struct ifnet *ifp;
2058
2059 sc = arg;
2060
2061 NFE_LOCK_ASSERT(sc);
2062
2063 ifp = sc->nfe_ifp;
2064
2065 mii = device_get_softc(sc->nfe_miibus);
2066 mii_tick(mii);
2067
2068 if (!sc->nfe_link) {
2069 if (mii->mii_media_status & IFM_ACTIVE &&

--- 7 unchanged lines hidden (view full) ---

2077 }
2078 }
2079 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2080
2081 return;
2082}
2083
2084
2085static void nfe_shutdown(device_t dev)
2086{
2087 struct nfe_softc *sc;
2088 struct ifnet *ifp;
2089
2090 sc = device_get_softc(dev);
2091
2092 NFE_LOCK(sc);
2093 ifp = sc->nfe_ifp;
2094 nfe_stop(ifp,0);
2095 /* nfe_reset(sc); */
2096 NFE_UNLOCK(sc);
2097
2098 return;
2099}
2100
2101
2102static void nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
2103{
2104 uint32_t tmp;
2105
2106 tmp = NFE_READ(sc, NFE_MACADDR_LO);
2107 addr[0] = (tmp >> 8) & 0xff;
2108 addr[1] = (tmp & 0xff);
2109
2110 tmp = NFE_READ(sc, NFE_MACADDR_HI);
2111 addr[2] = (tmp >> 24) & 0xff;
2112 addr[3] = (tmp >> 16) & 0xff;
2113 addr[4] = (tmp >> 8) & 0xff;
2114 addr[5] = (tmp & 0xff);
2115}
2116
2117static void nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
2118{
2119
2120 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
2121 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2122 addr[1] << 8 | addr[0]);
2123}
2124
2125/*
2126 * Map a single buffer address.
2127 */
2128
2129static void
2130nfe_dma_map_segs(arg, segs, nseg, error)
2131 void *arg;
2132 bus_dma_segment_t *segs;

--- 12 unchanged lines hidden ---