if_nfe.c revision 1.8
1/*	$OpenBSD: if_nfe.c,v 1.8 2006/01/20 22:02:03 brad Exp $	*/
2/*
3 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
4 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/* Driver for nvidia nForce Ethernet */
20
21#include "bpfilter.h"
22#include "vlan.h"
23
24#include <sys/param.h>
25#include <sys/endian.h>
26#include <sys/systm.h>
27#include <sys/types.h>
28#include <sys/sockio.h>
29#include <sys/mbuf.h>
30#include <sys/malloc.h>
31#include <sys/kernel.h>
32#include <sys/device.h>
33#include <sys/socket.h>
34
35#include <machine/bus.h>
36
37#include <net/if.h>
38#include <net/if_dl.h>
39#include <net/if_media.h>
40
41#ifdef INET
42#include <netinet/in.h>
43#include <netinet/in_systm.h>
44#include <netinet/in_var.h>
45#include <netinet/ip.h>
46#include <netinet/if_ether.h>
47#endif
48
49#if NVLAN > 0
50#include <net/if_types.h>
51#include <net/if_vlan_var.h>
52#endif
53
54#if NBPFILTER > 0
55#include <net/bpf.h>
56#endif
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63#include <dev/pci/pcidevs.h>
64
65#include <dev/pci/if_nfereg.h>
66#include <dev/pci/if_nfevar.h>
67
68int	nfe_match(struct device *, void *, void *);
69void	nfe_attach(struct device *, struct device *, void *);
70
71int	nfe_intr(void *);
72int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
73void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
74void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
75int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
76void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
77void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
78
79void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
82void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
83void	nfe_rxeof(struct nfe_softc *);
84void	nfe_txeof(struct nfe_softc *);
85int	nfe_encap(struct nfe_softc *, struct mbuf *);
86
87int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
88void	nfe_start(struct ifnet *);
89void	nfe_stop(struct ifnet *, int);
90void	nfe_watchdog(struct ifnet *);
91int	nfe_init(struct ifnet *);
92void	nfe_setmulti(struct nfe_softc *);
93void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
94void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
95void	nfe_update_promisc(struct nfe_softc *);
96void	nfe_tick(void *);
97
98int	nfe_miibus_readreg(struct device *, int, int);
99void	nfe_miibus_writereg(struct device *, int, int, int);
100void	nfe_miibus_statchg(struct device *);
101int	nfe_mediachange(struct ifnet *);
102void	nfe_mediastatus(struct ifnet *, struct ifmediareq *);
103
104struct cfattach nfe_ca = {
105	sizeof (struct nfe_softc),
106	nfe_match,
107	nfe_attach
108};
109
110struct cfdriver nfe_cd = {
111	0, "nfe", DV_IFNET
112};
113
114
115#ifdef NFE_DEBUG
116int nfedebug = 1;
117#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
118#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
119#else
120#define DPRINTF(x)
121#define DPRINTFN(n,x)
122#endif
123
124const struct pci_matchid nfe_devices[] = {
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
136	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
137	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
138	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
139	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
140};
141
142int
143nfe_match(struct device *dev, void *match, void *aux)
144{
145	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
146	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
147}
148
149void
150nfe_attach(struct device *parent, struct device *self, void *aux)
151{
152	struct nfe_softc *sc = (struct nfe_softc *)self;
153	struct pci_attach_args *pa = aux;
154	pci_chipset_tag_t pc = pa->pa_pc;
155	pci_intr_handle_t ih;
156	const char *intrstr;
157	struct ifnet *ifp;
158	bus_size_t memsize;
159
160	/*
161	 * Map control/status registers.
162	 */
163	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
164	    &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) {
165		printf(": can't map mem space\n");
166		return;
167	}
168
169	/* Allocate interrupt */
170	if (pci_intr_map(pa, &ih) != 0) {
171		printf(": couldn't map interrupt\n");
172		return;
173	}
174
175	intrstr = pci_intr_string(pc, ih);
176	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
177	    sc->sc_dev.dv_xname);
178	if (sc->sc_ih == NULL) {
179		printf(": couldn't establish interrupt");
180		if (intrstr != NULL)
181			printf(" at %s", intrstr);
182		return;
183	}
184	printf(": %s", intrstr);
185
186	sc->sc_dmat = pa->pa_dmat;
187
188	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
189	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
190
191	sc->sc_flags = 0;
192
193	switch (PCI_PRODUCT(pa->pa_id)) {
194	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
195	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
196	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
197	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
198		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
199		break;
200	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
201	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
202		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
203		break;
204	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
205	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
206	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
207	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
208	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
209	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
210		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
211		break;
212	}
213
214	/*
215	 * Allocate Tx and Rx rings.
216	 */
217	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
218		printf("%s: could not allocate Tx ring\n",
219		    sc->sc_dev.dv_xname);
220		return;
221	}
222
223	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
224		printf("%s: could not allocate Rx ring\n",
225		    sc->sc_dev.dv_xname);
226		nfe_free_tx_ring(sc, &sc->txq);
227		return;
228	}
229
230	ifp = &sc->sc_arpcom.ac_if;
231	ifp->if_softc = sc;
232	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
233	ifp->if_ioctl = nfe_ioctl;
234	ifp->if_start = nfe_start;
235	ifp->if_watchdog = nfe_watchdog;
236	ifp->if_init = nfe_init;
237	ifp->if_baudrate = 1000000000;
238	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
239	IFQ_SET_READY(&ifp->if_snd);
240
241	/* Set interface name */
242	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
243
244	sc->sc_mii.mii_ifp = ifp;
245	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
246	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
247	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
248
249	/* XXX always seem to get a ghost ukphy along with eephy on nf4u */
250	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_mediachange,
251	    nfe_mediastatus);
252	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
253	    MII_OFFSET_ANY, 0);
254	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
255		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
256		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
257		    0, NULL);
258		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
259	} else
260		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
261
262	if_attach(ifp);
263	ether_ifattach(ifp);
264
265	/* XXX powerhook */
266}
267
268void
269nfe_miibus_statchg(struct device *dev)
270{
271	struct nfe_softc *sc = (struct nfe_softc *)dev;
272	struct mii_data	*mii = &sc->sc_mii;
273	uint32_t reg;
274
275	reg = NFE_READ(sc, NFE_PHY_INT);
276
277	switch (IFM_SUBTYPE(mii->mii_media_active)) {
278	case IFM_1000_T:
279		reg |= NFE_PHY_1000T;
280		break;
281	case IFM_100_TX:
282		reg |= NFE_PHY_100TX;
283		break;
284	}
285
286	NFE_WRITE(sc, NFE_PHY_INT, reg);
287}
288
289int
290nfe_miibus_readreg(struct device *dev, int phy, int reg)
291{
292	struct nfe_softc *sc = (struct nfe_softc *)dev;
293	uint32_t r;
294
295	r = NFE_READ(sc, NFE_PHY_CTL);
296	if (r & NFE_PHY_BUSY) {
297		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
298		delay(100);
299	}
300
301	NFE_WRITE(sc, NFE_PHY_CTL, reg | (phy << NFE_PHYADD_SHIFT));
302	delay(1000);
303	r = NFE_READ(sc, NFE_PHY_DATA);
304	if (r != 0xffffffff && r != 0)
305		sc->phyaddr = phy;
306
307	DPRINTFN(2, ("nfe mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, r));
308
309	return r;
310}
311
312void
313nfe_miibus_writereg(struct device *dev, int phy, int reg, int data)
314{
315	struct nfe_softc *sc = (struct nfe_softc *)dev;
316	uint32_t r;
317
318	r = NFE_READ(sc, NFE_PHY_CTL);
319	if (r & NFE_PHY_BUSY) {
320		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
321		delay(100);
322	}
323
324	NFE_WRITE(sc, NFE_PHY_DATA, data);
325	r = reg | (phy << NFE_PHYADD_SHIFT) | NFE_PHY_WRITE;
326	NFE_WRITE(sc, NFE_PHY_CTL, r);
327}
328
329int
330nfe_intr(void *arg)
331{
332	struct nfe_softc *sc = arg;
333	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
334	uint32_t r;
335
336	/* disable interrupts */
337	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
338
339	r = NFE_READ(sc, NFE_IRQ_STATUS);
340	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
341
342	if (r == 0) {
343		/* re-enable interrupts */
344		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
345		return 0;
346	}
347
348	if (ifp->if_flags & IFF_RUNNING) {
349		/* check Rx ring */
350		nfe_rxeof(sc);
351
352		/* check Tx ring */
353		nfe_txeof(sc);
354	}
355
356	DPRINTF(("nfe_intr: interrupt register %x", r));
357
358	/* re-enable interrupts */
359	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
360
361	return 1;
362}
363
364int
365nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
366{
367	struct nfe_softc *sc = ifp->if_softc;
368	struct ifreq *ifr = (struct ifreq *)data;
369	struct ifaddr *ifa = (struct ifaddr *)data;
370	int s, error = 0;
371
372	s = splnet();
373
374	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
375		splx(s);
376		return error;
377	}
378
379	switch (cmd) {
380	case SIOCSIFADDR:
381		ifp->if_flags |= IFF_UP;
382		switch (ifa->ifa_addr->sa_family) {
383#ifdef INET
384		case AF_INET:
385			nfe_init(ifp);
386			arp_ifinit(&sc->sc_arpcom, ifa);
387			break;
388#endif
389		default:
390			nfe_init(ifp);
391			break;
392		}
393		break;
394	case SIOCSIFMTU:
395		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
396			error = EINVAL;
397		else if (ifp->if_mtu != ifr->ifr_mtu)
398			ifp->if_mtu = ifr->ifr_mtu;
399		break;
400	case SIOCSIFFLAGS:
401		if (ifp->if_flags & IFF_UP) {
402			if (ifp->if_flags & IFF_RUNNING)
403				nfe_update_promisc(sc);
404			else
405				nfe_init(ifp);
406		} else {
407			if (ifp->if_flags & IFF_RUNNING)
408				nfe_stop(ifp, 1);
409		}
410		break;
411	case SIOCADDMULTI:
412	case SIOCDELMULTI:
413		error = (cmd == SIOCADDMULTI) ?
414		    ether_addmulti(ifr, &sc->sc_arpcom) :
415		    ether_delmulti(ifr, &sc->sc_arpcom);
416
417		if (error == ENETRESET) {
418			if (ifp->if_flags & IFF_RUNNING)
419				nfe_setmulti(sc);
420			error = 0;
421		}
422		break;
423	case SIOCSIFMEDIA:
424	case SIOCGIFMEDIA:
425		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
426		break;
427	default:
428		error = EINVAL;
429	}
430
431	splx(s);
432
433	return error;
434}
435
436void
437nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
438{
439	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
440	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
441	    sizeof (struct nfe_desc32), ops);
442}
443
444void
445nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
446{
447	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
448	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
449	    sizeof (struct nfe_desc64), ops);
450}
451
452void
453nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
454{
455	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
456	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
457	    sizeof (struct nfe_desc32), ops);
458}
459
460void
461nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
462{
463	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
464	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
465	    sizeof (struct nfe_desc64), ops);
466}
467
468void
469nfe_rxeof(struct nfe_softc *sc)
470{
471	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
472	struct nfe_desc32 *desc32;
473	struct nfe_desc64 *desc64;
474	struct nfe_rx_data *data;
475	struct mbuf *m, *mnew;
476	uint16_t flags;
477	int error, len;
478
479	for (;;) {
480		data = &sc->rxq.data[sc->rxq.cur];
481
482		if (sc->sc_flags & NFE_40BIT_ADDR) {	/* const condition */
483			desc64 = &sc->rxq.desc64[sc->rxq.cur];
484			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
485
486			flags = letoh16(desc64->flags);
487			len = letoh16(desc64->length) & 0x3fff;
488		} else {
489			desc32 = &sc->rxq.desc32[sc->rxq.cur];
490			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
491
492			flags = letoh16(desc32->flags);
493			len = letoh16(desc32->length) & 0x3fff;
494		}
495
496		if (flags & NFE_RX_READY)
497			break;
498
499		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
500			if (!(flags & NFE_RX_VALID_V1))
501				goto skip;
502			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
503				flags &= ~NFE_RX_ERROR;
504				len--;	/* fix buffer length */
505			}
506		} else {
507			if (!(flags & NFE_RX_VALID_V2))
508				goto skip;
509			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
510				flags &= ~NFE_RX_ERROR;
511				len--;	/* fix buffer length */
512			}
513		}
514
515		if (flags & NFE_RX_ERROR) {
516			ifp->if_ierrors++;
517			goto skip;
518		}
519
520		/*
521		 * Try to allocate a new mbuf for this ring element and load
522		 * it before processing the current mbuf. If the ring element
523		 * cannot be loaded, drop the received packet and reuse the
524		 * old mbuf. In the unlikely case that the old mbuf can't be
525		 * reloaded either, explicitly panic.
526		 */
527		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
528		if (mnew == NULL) {
529			ifp->if_ierrors++;
530			goto skip;
531		}
532
533		MCLGET(mnew, M_DONTWAIT);
534		if (!(mnew->m_flags & M_EXT)) {
535			m_freem(mnew);
536			ifp->if_ierrors++;
537			goto skip;
538		}
539
540		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
541		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
542		bus_dmamap_unload(sc->sc_dmat, data->map);
543
544		error = bus_dmamap_load(sc->sc_dmat, data->map,
545		    mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
546		if (error != 0) {
547			m_freem(mnew);
548
549			/* try to reload the old mbuf */
550			error = bus_dmamap_load(sc->sc_dmat, data->map,
551			    mtod(data->m, void *), MCLBYTES, NULL,
552			    BUS_DMA_NOWAIT);
553			if (error != 0) {
554				/* very unlikely that it will fail... */
555				panic("%s: could not load old rx mbuf",
556				    sc->sc_dev.dv_xname);
557			}
558			ifp->if_ierrors++;
559			goto skip;
560		}
561
562		/*
563	 	 * New mbuf successfully loaded, update Rx ring and continue
564		 * processing.
565		 */
566		m = data->m;
567		data->m = mnew;
568
569		/* finalize mbuf */
570		m->m_pkthdr.len = m->m_len = len;
571		m->m_pkthdr.rcvif = ifp;
572
573		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
574			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
575
576#if NBPFILTER > 0
577		if (ifp->if_bpf)
578			bpf_mtap(ifp->if_bpf, m);
579#endif
580		ifp->if_ipackets++;
581		ether_input_mbuf(ifp, m);
582
583skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {	/* const condition */
584#if defined(__amd64__)
585			desc64->physaddr[0] =
586			    htole32(data->map->dm_segs->ds_addr >> 32);
587#endif
588			desc64->physaddr[1] =
589			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
590			desc64->flags = htole16(NFE_RX_READY);
591			desc64->length = htole16(MCLBYTES);
592
593			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
594		} else {
595			desc32->physaddr =
596			    htole32(data->map->dm_segs->ds_addr);
597			desc32->flags = htole16(NFE_RX_READY);
598			desc32->length = htole16(MCLBYTES);
599
600			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
601		}
602
603		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
604	}
605}
606
607void
608nfe_txeof(struct nfe_softc *sc)
609{
610	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
611	struct nfe_desc32 *desc32;
612	struct nfe_desc64 *desc64;
613	struct nfe_tx_data *data;
614	uint16_t flags;
615
616/* XXX: should limit # iterations to NFE_TX_RING_COUNT */
617	for (;;) {
618		data = &sc->txq.data[sc->txq.next];
619
620		if (sc->sc_flags & NFE_40BIT_ADDR) {	/* const condition */
621			desc64 = &sc->txq.desc64[sc->txq.next];
622			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
623
624			flags = letoh16(desc64->flags);
625		} else {
626			desc32 = &sc->txq.desc32[sc->txq.next];
627			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
628
629			flags = letoh16(desc32->flags);
630		}
631
632		if (!(flags & NFE_TX_VALID))
633			break;
634
635		if (data->m == NULL)
636			goto skip;	/* skip intermediate fragments */
637
638		if (flags & NFE_TX_ERROR)
639			ifp->if_oerrors++;
640		else
641			ifp->if_opackets++;
642
643		/* last fragment of the mbuf chain transmitted */
644		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
645		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
646		bus_dmamap_unload(sc->sc_dmat, data->active);
647		m_freem(data->m);
648		data->m = NULL;
649
650skip:		sc->txq.queued--;
651		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
652	}
653
654	ifp->if_timer = 0;
655	ifp->if_flags &= ~IFF_OACTIVE;
656	nfe_start(ifp);
657}
658
659int
660nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
661{
662	struct nfe_desc32 *desc32;
663	struct nfe_desc64 *desc64;
664	struct nfe_tx_data *data;
665	struct mbuf *mnew;
666	bus_dmamap_t map;
667	uint32_t txctl = NFE_RXTX_KICKTX;
668	uint16_t flags = NFE_TX_VALID;
669	int error, i;
670
671	map = sc->txq.data[sc->txq.cur].map;
672
673	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
674	if (error != 0 && error != EFBIG) {
675		printf("%s: could not map mbuf (error %d)\n",
676		    sc->sc_dev.dv_xname, error);
677		m_freem(m0);
678		return error;
679	}
680	if (error != 0) {
681		/* too many fragments, linearize */
682
683		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
684		if (mnew == NULL) {
685			m_freem(m0);
686			return ENOMEM;
687		}
688
689		M_DUP_PKTHDR(mnew, m0);
690		if (m0->m_pkthdr.len > MHLEN) {
691			MCLGET(mnew, M_DONTWAIT);
692			if (!(mnew->m_flags & M_EXT)) {
693				m_freem(m0);
694				m_freem(mnew);
695				return ENOMEM;
696			}
697		}
698
699		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
700		m_freem(m0);
701		mnew->m_len = mnew->m_pkthdr.len;
702		m0 = mnew;
703
704		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
705		    BUS_DMA_NOWAIT);
706		if (error != 0) {
707			printf("%s: could not map mbuf (error %d)\n",
708			    sc->sc_dev.dv_xname, error);
709			m_freem(m0);
710			return error;
711		}
712	}
713
714	/* h/w checksum (XXX only if HW_CSUM?) */
715	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
716		flags |= NFE_TX_IP_CSUM;
717	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
718		flags |= NFE_TX_TCP_CSUM;
719
720	for (i = 0; i < map->dm_nsegs; i++) {
721
722		data = &sc->txq.data[sc->txq.cur];
723
724		if (sc->sc_flags & NFE_40BIT_ADDR) {	/* const condition */
725			desc64 = &sc->txq.desc64[sc->txq.cur];
726#if defined(__amd64__)
727			desc64->physaddr[0] =
728			    htole32(map->dm_segs[i].ds_addr >> 32);
729#endif
730			desc64->physaddr[1] =
731			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
732			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
733			desc64->flags = htole16(flags);
734
735			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
736		} else {
737			desc32 = &sc->txq.desc32[sc->txq.cur];
738
739			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
740			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
741			desc32->flags = htole16(flags);
742
743			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
744		}
745
746		/* csum flags belong to the first frament only */
747		if (map->dm_nsegs > 1)
748			flags &= ~(M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT);
749
750		sc->txq.queued++;
751		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
752	}
753
754	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
755	if (sc->sc_flags & NFE_40BIT_ADDR) {
756		txctl |= NFE_RXTX_V3MAGIC;
757		flags |= NFE_TX_LASTFRAG_V2;
758
759		desc64->flags = htole16(flags);
760		nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
761	} else {
762		if (sc->sc_flags & NFE_JUMBO_SUP) {
763			txctl |= NFE_RXTX_V2MAGIC;
764			flags |= NFE_TX_LASTFRAG_V2;
765		} else
766			flags |= NFE_TX_LASTFRAG_V1;
767
768		desc32->flags = htole16(flags);
769		nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
770	}
771
772	if (sc->sc_flags & NFE_HW_CSUM)
773		txctl |= NFE_RXTX_RXCHECK;
774
775	data->m = m0;
776	data->active = map;
777
778	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
779	    BUS_DMASYNC_PREWRITE);
780
781	/* kick Tx */
782	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
783
784	return 0;
785}
786
787void
788nfe_start(struct ifnet *ifp)
789{
790	struct nfe_softc *sc = ifp->if_softc;
791	struct mbuf *m0;
792
793	for (;;) {
794		IFQ_POLL(&ifp->if_snd, m0);
795		if (m0 == NULL)
796			break;
797
798		if (nfe_encap(sc, m0) != 0) {
799			ifp->if_flags |= IFF_OACTIVE;
800			break;
801		}
802
803		/* packet put in h/w queue, remove from s/w queue */
804		IFQ_DEQUEUE(&ifp->if_snd, m0);
805
806#if NBPFILTER > 0
807		if (ifp->if_bpf != NULL)
808			bpf_mtap(ifp->if_bpf, m0);
809#endif
810
811		/* start watchdog timer */
812		ifp->if_timer = 5;
813	}
814}
815
816void
817nfe_watchdog(struct ifnet *ifp)
818{
819	struct nfe_softc *sc = ifp->if_softc;
820
821	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
822
823	ifp->if_flags &= ~IFF_RUNNING;
824	nfe_init(ifp);
825
826	ifp->if_oerrors++;
827}
828
829void
830nfe_stop(struct ifnet *ifp, int disable)
831{
832	struct nfe_softc *sc = ifp->if_softc;
833
834	timeout_del(&sc->sc_timeout);
835
836	ifp->if_timer = 0;
837	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
838
839	/* abort Tx */
840	NFE_WRITE(sc, NFE_TX_CTL, 0);
841
842	/* disable Rx */
843	NFE_WRITE(sc, NFE_RX_CTL, 0);
844
845	/* disable interrupts */
846	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
847
848	/* reset Tx and Rx rings */
849	nfe_reset_tx_ring(sc, &sc->txq);
850	nfe_reset_rx_ring(sc, &sc->rxq);
851}
852
853int
854nfe_init(struct ifnet *ifp)
855{
856	struct nfe_softc *sc = ifp->if_softc;
857	uint32_t rxtxctl;
858
859	nfe_stop(ifp, 0);
860
861	NFE_WRITE(sc, NFE_TX_UNK, 0);
862
863	rxtxctl = NFE_RXTX_BIT2;
864	if (sc->sc_flags & NFE_40BIT_ADDR)
865		rxtxctl |= NFE_RXTX_V3MAGIC;
866	else if (sc->sc_flags & NFE_JUMBO_SUP)
867		rxtxctl |= NFE_RXTX_V2MAGIC;
868	if (sc->sc_flags & NFE_HW_CSUM)
869		rxtxctl |= NFE_RXTX_RXCHECK;
870
871	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
872	delay(10);
873	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
874
875	NFE_WRITE(sc, NFE_SETUP_R6, 0);
876
877	/* set MAC address */
878	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
879
880	/* tell MAC where rings are in memory */
881	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
882	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
883
884	NFE_WRITE(sc, NFE_RING_SIZE,
885	    (NFE_RX_RING_COUNT - 1) << 16 |
886	    (NFE_TX_RING_COUNT - 1));
887
888	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
889	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
890	NFE_WRITE(sc, NFE_TIMER_INT, 970);		/* XXX Magic */
891
892	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
893	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
894
895	rxtxctl &= ~NFE_RXTX_BIT2;
896	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
897	delay(10);
898	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
899
900	/* enable Rx */
901	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
902
903	/* enable Tx */
904	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
905
906	nfe_setmulti(sc);
907
908	/* enable interrupts */
909	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
910
911	mii_mediachg(&sc->sc_mii);
912
913	timeout_set(&sc->sc_timeout, nfe_tick, sc);
914
915	ifp->if_flags |= IFF_RUNNING;
916	ifp->if_flags &= ~IFF_OACTIVE;
917
918	return 0;
919}
920
921int
922nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
923{
924	struct nfe_rx_data *data;
925	struct nfe_desc32 *desc32;
926	struct nfe_desc64 *desc64;
927	void **desc;
928	int i, nsegs, error, descsize;
929
930	if (sc->sc_flags & NFE_40BIT_ADDR) {
931		desc = (void **)&ring->desc64;
932		descsize = sizeof (struct nfe_desc64);
933	} else {
934		desc = (void **)&ring->desc32;
935		descsize = sizeof (struct nfe_desc32);
936	}
937
938	ring->cur = ring->next = 0;
939
940	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
941	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
942	if (error != 0) {
943		printf("%s: could not create desc DMA map\n",
944		    sc->sc_dev.dv_xname);
945		goto fail;
946	}
947
948	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
949	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
950	if (error != 0) {
951		printf("%s: could not allocate DMA memory\n",
952		    sc->sc_dev.dv_xname);
953		goto fail;
954	}
955
956	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
957	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
958	if (error != 0) {
959		printf("%s: could not map desc DMA memory\n",
960		    sc->sc_dev.dv_xname);
961		goto fail;
962	}
963
964	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
965	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
966	if (error != 0) {
967		printf("%s: could not load desc DMA map\n",
968		    sc->sc_dev.dv_xname);
969		goto fail;
970	}
971
972	bzero(*desc, NFE_RX_RING_COUNT * descsize);
973	ring->physaddr = ring->map->dm_segs->ds_addr;
974
975	/*
976	 * Pre-allocate Rx buffers and populate Rx ring.
977	 */
978
979	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
980		data = &sc->rxq.data[i];
981
982		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
983		    0, BUS_DMA_NOWAIT, &data->map);
984		if (error != 0) {
985			printf("%s: could not create DMA map\n",
986			    sc->sc_dev.dv_xname);
987			goto fail;
988		}
989
990		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
991		if (data->m == NULL) {
992			printf("%s: could not allocate rx mbuf\n",
993			    sc->sc_dev.dv_xname);
994			error = ENOMEM;
995			goto fail;
996		}
997
998		MCLGET(data->m, M_DONTWAIT);
999		if (!(data->m->m_flags & M_EXT)) {
1000			printf("%s: could not allocate rx mbuf cluster\n",
1001			    sc->sc_dev.dv_xname);
1002			error = ENOMEM;
1003			goto fail;
1004		}
1005
1006		error = bus_dmamap_load(sc->sc_dmat, data->map,
1007		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
1008		if (error != 0) {
1009			printf("%s: could not load rx buf DMA map",
1010			    sc->sc_dev.dv_xname);
1011			goto fail;
1012		}
1013
1014		if (sc->sc_flags & NFE_40BIT_ADDR) {
1015			desc64 = &sc->rxq.desc64[i];
1016#if defined(__amd64__)
1017			desc64->physaddr[0] =
1018			    htole32(data->map->dm_segs->ds_addr >> 32);
1019#endif
1020			desc64->physaddr[1] =
1021			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
1022			desc64->length = htole16(MCLBYTES);
1023			desc64->flags = htole16(NFE_RX_READY);
1024		} else {
1025			desc32 = &sc->rxq.desc32[i];
1026			desc32->physaddr =
1027			    htole32(data->map->dm_segs->ds_addr);
1028			desc32->length = htole16(MCLBYTES);
1029			desc32->flags = htole16(NFE_RX_READY);
1030		}
1031	}
1032
1033	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1034	    BUS_DMASYNC_PREWRITE);
1035
1036	return 0;
1037
1038fail:	nfe_free_rx_ring(sc, ring);
1039	return error;
1040}
1041
1042void
1043nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1044{
1045	int i;
1046
1047	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1048		if (sc->sc_flags & NFE_40BIT_ADDR) {
1049			ring->desc64[i].length = htole16(MCLBYTES);
1050			ring->desc64[i].flags = htole16(NFE_RX_READY);
1051		} else {
1052			ring->desc32[i].length = htole16(MCLBYTES);
1053			ring->desc32[i].flags = htole16(NFE_RX_READY);
1054		}
1055	}
1056
1057	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1058	    BUS_DMASYNC_PREWRITE);
1059
1060	ring->cur = ring->next = 0;
1061}
1062
1063void
1064nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1065{
1066	struct nfe_rx_data *data;
1067	void *desc;
1068	int i, descsize;
1069
1070	if (sc->sc_flags & NFE_40BIT_ADDR) {
1071		desc = ring->desc64;
1072		descsize = sizeof (struct nfe_desc64);
1073	} else {
1074		desc = ring->desc32;
1075		descsize = sizeof (struct nfe_desc32);
1076	}
1077
1078	if (desc != NULL) {
1079		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1080		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1081		bus_dmamap_unload(sc->sc_dmat, ring->map);
1082		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1083		    NFE_RX_RING_COUNT * descsize);
1084		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1085	}
1086
1087	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1088		data = &ring->data[i];
1089
1090		if (data->m != NULL) {
1091			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1092			    data->map->dm_mapsize,
1093			    BUS_DMASYNC_POSTREAD);
1094			bus_dmamap_unload(sc->sc_dmat, data->map);
1095			m_freem(data->m);
1096		}
1097
1098		if (data->map != NULL)
1099			bus_dmamap_destroy(sc->sc_dmat, data->map);
1100	}
1101}
1102
1103int
1104nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1105{
1106	int i, nsegs, error;
1107	void **desc;
1108	int descsize;
1109
1110	if (sc->sc_flags & NFE_40BIT_ADDR) {
1111		desc = (void **)&ring->desc64;
1112		descsize = sizeof (struct nfe_desc64);
1113	} else {
1114		desc = (void **)&ring->desc32;
1115		descsize = sizeof (struct nfe_desc32);
1116	}
1117
1118	ring->queued = 0;
1119	ring->cur = ring->next = 0;
1120
1121	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1122	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1123
1124	if (error != 0) {
1125		printf("%s: could not create desc DMA map\n",
1126		    sc->sc_dev.dv_xname);
1127		goto fail;
1128	}
1129
1130	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1131	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1132	if (error != 0) {
1133		printf("%s: could not allocate DMA memory\n",
1134		    sc->sc_dev.dv_xname);
1135		goto fail;
1136	}
1137
1138	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1139	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1140	if (error != 0) {
1141		printf("%s: could not map desc DMA memory\n",
1142		    sc->sc_dev.dv_xname);
1143		goto fail;
1144	}
1145
1146	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1147	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1148	if (error != 0) {
1149		printf("%s: could not load desc DMA map\n",
1150		    sc->sc_dev.dv_xname);
1151		goto fail;
1152	}
1153
1154	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1155	ring->physaddr = ring->map->dm_segs->ds_addr;
1156
1157	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1158		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1159		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
1160		    &ring->data[i].map);
1161		if (error != 0) {
1162			printf("%s: could not create DMA map\n",
1163			    sc->sc_dev.dv_xname);
1164			goto fail;
1165		}
1166	}
1167
1168	return 0;
1169
1170fail:	nfe_free_tx_ring(sc, ring);
1171	return error;
1172}
1173
1174void
1175nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1176{
1177	struct nfe_tx_data *data;
1178	int i;
1179
1180	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1181		if (sc->sc_flags & NFE_40BIT_ADDR)
1182			ring->desc64[i].flags = 0;
1183		else
1184			ring->desc32[i].flags = 0;
1185
1186		data = &ring->data[i];
1187
1188		if (data->m != NULL) {
1189			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1190			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1191			bus_dmamap_unload(sc->sc_dmat, data->map);
1192			m_freem(data->m);
1193			data->m = NULL;
1194		}
1195	}
1196
1197	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1198	    BUS_DMASYNC_PREWRITE);
1199
1200	ring->queued = 0;
1201	ring->cur = ring->next = 0;
1202}
1203
1204void
1205nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1206{
1207	struct nfe_tx_data *data;
1208	void *desc;
1209	int i, descsize;
1210
1211	if (sc->sc_flags & NFE_40BIT_ADDR) {
1212		desc = ring->desc64;
1213		descsize = sizeof (struct nfe_desc64);
1214	} else {
1215		desc = ring->desc32;
1216		descsize = sizeof (struct nfe_desc32);
1217	}
1218
1219	if (desc != NULL) {
1220		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1221		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1222		bus_dmamap_unload(sc->sc_dmat, ring->map);
1223		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1224		    NFE_TX_RING_COUNT * descsize);
1225		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1226	}
1227
1228	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1229		data = &ring->data[i];
1230
1231		if (data->m != NULL) {
1232			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1233			    data->map->dm_mapsize,
1234			    BUS_DMASYNC_POSTWRITE);
1235			bus_dmamap_unload(sc->sc_dmat, data->map);
1236			m_freem(data->m);
1237		}
1238
1239		if (data->map != NULL)
1240			bus_dmamap_destroy(sc->sc_dmat, data->map);
1241	}
1242}
1243
1244int
1245nfe_mediachange(struct ifnet *ifp)
1246{
1247	struct nfe_softc *sc = ifp->if_softc;
1248	struct mii_data	*mii = &sc->sc_mii;
1249	uint32_t val;
1250
1251	DPRINTF(("nfe_mediachange\n"));
1252#if 0
1253	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1254		/* XXX? */
1255	else
1256#endif
1257		val = 0;
1258
1259	val |= NFE_MEDIA_SET;
1260
1261	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1262	case IFM_1000_T:
1263		val |= NFE_MEDIA_1000T;
1264		break;
1265	case IFM_100_TX:
1266		val |= NFE_MEDIA_100TX;
1267		break;
1268	case IFM_10_T:
1269		val |= NFE_MEDIA_10T;
1270		break;
1271	}
1272
1273	DPRINTF(("nfe_miibus_statchg: val=0x%x\n", val));
1274	NFE_WRITE(sc, NFE_LINKSPEED, val);
1275
1276	return 0;
1277}
1278
1279void
1280nfe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1281{
1282	struct nfe_softc *sc = ifp->if_softc;
1283
1284	mii_pollstat(&sc->sc_mii);
1285	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1286	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1287}
1288
1289void
1290nfe_setmulti(struct nfe_softc *sc)
1291{
1292	NFE_WRITE(sc, NFE_MULT_ADDR1, 0x01);
1293	NFE_WRITE(sc, NFE_MULT_ADDR2, 0);
1294	NFE_WRITE(sc, NFE_MULT_MASK1, 0);
1295	NFE_WRITE(sc, NFE_MULT_MASK2, 0);
1296#ifdef notyet
1297	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_MYADDR);
1298#else
1299	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_PROMISC);
1300#endif
1301}
1302
1303void
1304nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1305{
1306	uint32_t tmp;
1307
1308	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1309	addr[0] = (tmp >> 8) & 0xff;
1310	addr[1] = (tmp & 0xff);
1311
1312	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1313	addr[2] = (tmp >> 24) & 0xff;
1314	addr[3] = (tmp >> 16) & 0xff;
1315	addr[4] = (tmp >>  8) & 0xff;
1316	addr[5] = (tmp & 0xff);
1317}
1318
1319void
1320nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1321{
1322	NFE_WRITE(sc, NFE_MACADDR_LO,
1323	    addr[0] <<  8 | addr[1]);
1324	NFE_WRITE(sc, NFE_MACADDR_HI,
1325	    addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5]);
1326}
1327
1328void
1329nfe_update_promisc(struct nfe_softc *sc)
1330{
1331}
1332
1333void
1334nfe_tick(void *arg)
1335{
1336	struct nfe_softc *sc = arg;
1337	int s;
1338
1339	s = splnet();
1340	mii_tick(&sc->sc_mii);
1341	splx(s);
1342
1343	timeout_add(&sc->sc_timeout, hz);
1344}
1345