if_nfe.c revision 1.9
1/*	$OpenBSD: if_nfe.c,v 1.9 2006/01/22 21:35:08 damien Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for nvidia nForce Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/malloc.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_media.h>
41
42#ifdef INET
43#include <netinet/in.h>
44#include <netinet/in_systm.h>
45#include <netinet/in_var.h>
46#include <netinet/ip.h>
47#include <netinet/if_ether.h>
48#endif
49
50#if NVLAN > 0
51#include <net/if_types.h>
52#include <net/if_vlan_var.h>
53#endif
54
55#if NBPFILTER > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64#include <dev/pci/pcidevs.h>
65
66#include <dev/pci/if_nfereg.h>
67#include <dev/pci/if_nfevar.h>
68
69int	nfe_match(struct device *, void *, void *);
70void	nfe_attach(struct device *, struct device *, void *);
71void	nfe_power(int, void *);
72void	nfe_miibus_statchg(struct device *);
73int	nfe_miibus_readreg(struct device *, int, int);
74void	nfe_miibus_writereg(struct device *, int, int, int);
75int	nfe_intr(void *);
76int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
77void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
78void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
79void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81void	nfe_rxeof(struct nfe_softc *);
82void	nfe_txeof(struct nfe_softc *);
83int	nfe_encap(struct nfe_softc *, struct mbuf *);
84void	nfe_start(struct ifnet *);
85void	nfe_watchdog(struct ifnet *);
86int	nfe_init(struct ifnet *);
87void	nfe_stop(struct ifnet *, int);
88int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
89void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
90void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
91int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
92void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
93void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
94int	nfe_mediachange(struct ifnet *);
95void	nfe_mediastatus(struct ifnet *, struct ifmediareq *);
96void	nfe_setmulti(struct nfe_softc *);
97void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
98void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
99void	nfe_update_promisc(struct nfe_softc *);
100void	nfe_tick(void *);
101
102struct cfattach nfe_ca = {
103	sizeof (struct nfe_softc), nfe_match, nfe_attach
104};
105
106struct cfdriver nfe_cd = {
107	NULL, "nfe", DV_IFNET
108};
109
110#define NFE_DEBUG
111
112#ifdef NFE_DEBUG
113int nfedebug = 1;
114#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
115#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
116#else
117#define DPRINTF(x)
118#define DPRINTFN(n,x)
119#endif
120
121const struct pci_matchid nfe_devices[] = {
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
136	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
137};
138
139int
140nfe_match(struct device *dev, void *match, void *aux)
141{
142	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
143	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
144}
145
146void
147nfe_attach(struct device *parent, struct device *self, void *aux)
148{
149	struct nfe_softc *sc = (struct nfe_softc *)self;
150	struct pci_attach_args *pa = aux;
151	pci_chipset_tag_t pc = pa->pa_pc;
152	pci_intr_handle_t ih;
153	const char *intrstr;
154	struct ifnet *ifp;
155	bus_size_t memsize;
156
157	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
158	    &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) {
159		printf(": can't map mem space\n");
160		return;
161	}
162
163	/*
164	 * Allocate interrupt.
165	 */
166	if (pci_intr_map(pa, &ih) != 0) {
167		printf(": couldn't map interrupt\n");
168		return;
169	}
170
171	intrstr = pci_intr_string(pc, ih);
172	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
173	    sc->sc_dev.dv_xname);
174	if (sc->sc_ih == NULL) {
175		printf(": couldn't establish interrupt");
176		if (intrstr != NULL)
177			printf(" at %s", intrstr);
178		return;
179	}
180	printf(": %s", intrstr);
181
182	sc->sc_dmat = pa->pa_dmat;
183
184	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
185	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
186
187	sc->sc_flags = 0;
188
189	switch (PCI_PRODUCT(pa->pa_id)) {
190	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
191	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
192	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
193	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
194		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
195		break;
196	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
197	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
198		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
199		break;
200	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
201	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
202	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
203	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
204	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
205	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
206		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
207		break;
208	}
209
210	/*
211	 * Allocate Tx and Rx rings.
212	 */
213	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
214		printf("%s: could not allocate Tx ring\n",
215		    sc->sc_dev.dv_xname);
216		return;
217	}
218
219	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
220		printf("%s: could not allocate Rx ring\n",
221		    sc->sc_dev.dv_xname);
222		nfe_free_tx_ring(sc, &sc->txq);
223		return;
224	}
225
226	ifp = &sc->sc_arpcom.ac_if;
227	ifp->if_softc = sc;
228	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
229	ifp->if_ioctl = nfe_ioctl;
230	ifp->if_start = nfe_start;
231	ifp->if_watchdog = nfe_watchdog;
232	ifp->if_init = nfe_init;
233	ifp->if_baudrate = 1000000000;
234	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
235	IFQ_SET_READY(&ifp->if_snd);
236	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
237
238	sc->sc_mii.mii_ifp = ifp;
239	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
240	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
241	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
242
243	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_mediachange,
244	    nfe_mediastatus);
245	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
246	    MII_OFFSET_ANY, 0);
247	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
248		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
249		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
250		    0, NULL);
251		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
252	} else
253		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
254
255	if_attach(ifp);
256	ether_ifattach(ifp);
257
258	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
259}
260
261void
262nfe_power(int why, void *arg)
263{
264	struct nfe_softc *sc = arg;
265	struct ifnet *ifp;
266
267	if (why == PWR_RESUME) {
268		ifp = &sc->sc_arpcom.ac_if;
269		if (ifp->if_flags & IFF_UP) {
270			nfe_init(ifp);
271			if (ifp->if_flags & IFF_RUNNING)
272				nfe_start(ifp);
273		}
274	}
275}
276
277void
278nfe_miibus_statchg(struct device *dev)
279{
280	struct nfe_softc *sc = (struct nfe_softc *)dev;
281	struct mii_data *mii = &sc->sc_mii;
282	uint32_t reg;
283
284	reg = NFE_READ(sc, NFE_PHY_INT);
285
286	switch (IFM_SUBTYPE(mii->mii_media_active)) {
287	case IFM_1000_T:
288		reg |= NFE_PHY_1000T;
289		break;
290	case IFM_100_TX:
291		reg |= NFE_PHY_100TX;
292		break;
293	}
294
295	NFE_WRITE(sc, NFE_PHY_INT, reg);
296}
297
298int
299nfe_miibus_readreg(struct device *dev, int phy, int reg)
300{
301	struct nfe_softc *sc = (struct nfe_softc *)dev;
302	uint32_t val;
303	int ntries;
304
305	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
306
307	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
308		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
309		DELAY(100);
310	}
311
312	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
313
314	for (ntries = 0; ntries < 1000; ntries++) {
315		DELAY(100);
316		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
317			break;
318	}
319	if (ntries == 1000) {
320		DPRINTFN(2, ("timeout waiting for PHY\n"));
321		return 0;
322	}
323
324	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
325		DPRINTFN(2, ("could not read PHY\n"));
326		return 0;
327	}
328
329	val = NFE_READ(sc, NFE_PHY_DATA);
330	if (val != 0xffffffff && val != 0)
331		sc->phyaddr = phy;
332
333	DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val));
334
335	return val;
336}
337
338void
339nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
340{
341	struct nfe_softc *sc = (struct nfe_softc *)dev;
342	uint32_t ctl;
343	int ntries;
344
345	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
346
347	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
348		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
349		DELAY(100);
350	}
351
352	NFE_WRITE(sc, NFE_PHY_DATA, val);
353	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
354	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
355
356	for (ntries = 0; ntries < 1000; ntries++) {
357		DELAY(100);
358		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
359			break;
360	}
361#ifdef NFE_DEBUG
362	if (nfedebug >= 2 && ntries == 1000)
363		printf("could not write to PHY\n");
364#endif
365}
366
367int
368nfe_intr(void *arg)
369{
370	struct nfe_softc *sc = arg;
371	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
372	uint32_t r;
373
374	/* disable interrupts */
375	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
376
377	r = NFE_READ(sc, NFE_IRQ_STATUS);
378	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
379
380	if (r == 0) {
381		/* re-enable interrupts */
382		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
383		return 0;
384	}
385
386	if (ifp->if_flags & IFF_RUNNING) {
387		/* check Rx ring */
388		nfe_rxeof(sc);
389
390		/* check Tx ring */
391		nfe_txeof(sc);
392	}
393
394	DPRINTF(("nfe_intr: interrupt register %x", r));
395
396	/* re-enable interrupts */
397	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
398
399	return 1;
400}
401
402int
403nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
404{
405	struct nfe_softc *sc = ifp->if_softc;
406	struct ifreq *ifr = (struct ifreq *)data;
407	struct ifaddr *ifa = (struct ifaddr *)data;
408	int s, error = 0;
409
410	s = splnet();
411
412	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
413		splx(s);
414		return error;
415	}
416
417	switch (cmd) {
418	case SIOCSIFADDR:
419		ifp->if_flags |= IFF_UP;
420		switch (ifa->ifa_addr->sa_family) {
421#ifdef INET
422		case AF_INET:
423			nfe_init(ifp);
424			arp_ifinit(&sc->sc_arpcom, ifa);
425			break;
426#endif
427		default:
428			nfe_init(ifp);
429			break;
430		}
431		break;
432	case SIOCSIFMTU:
433		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
434			error = EINVAL;
435		else if (ifp->if_mtu != ifr->ifr_mtu)
436			ifp->if_mtu = ifr->ifr_mtu;
437		break;
438	case SIOCSIFFLAGS:
439		if (ifp->if_flags & IFF_UP) {
440			if (ifp->if_flags & IFF_RUNNING)
441				nfe_update_promisc(sc);
442			else
443				nfe_init(ifp);
444		} else {
445			if (ifp->if_flags & IFF_RUNNING)
446				nfe_stop(ifp, 1);
447		}
448		break;
449	case SIOCADDMULTI:
450	case SIOCDELMULTI:
451		error = (cmd == SIOCADDMULTI) ?
452		    ether_addmulti(ifr, &sc->sc_arpcom) :
453		    ether_delmulti(ifr, &sc->sc_arpcom);
454
455		if (error == ENETRESET) {
456			if (ifp->if_flags & IFF_RUNNING)
457				nfe_setmulti(sc);
458			error = 0;
459		}
460		break;
461	case SIOCSIFMEDIA:
462	case SIOCGIFMEDIA:
463		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
464		break;
465	default:
466		error = EINVAL;
467	}
468
469	splx(s);
470
471	return error;
472}
473
474void
475nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
476{
477	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
478	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
479	    sizeof (struct nfe_desc32), ops);
480}
481
482void
483nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
484{
485	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
486	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
487	    sizeof (struct nfe_desc64), ops);
488}
489
490void
491nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
492{
493	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
494	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
495	    sizeof (struct nfe_desc32), ops);
496}
497
498void
499nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
500{
501	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
502	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
503	    sizeof (struct nfe_desc64), ops);
504}
505
506void
507nfe_rxeof(struct nfe_softc *sc)
508{
509	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
510	struct nfe_desc32 *desc32;
511	struct nfe_desc64 *desc64;
512	struct nfe_rx_data *data;
513	struct mbuf *m, *mnew;
514	uint16_t flags;
515	int error, len;
516
517	for (;;) {
518		data = &sc->rxq.data[sc->rxq.cur];
519
520		if (sc->sc_flags & NFE_40BIT_ADDR) {
521			desc64 = &sc->rxq.desc64[sc->rxq.cur];
522			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
523
524			flags = letoh16(desc64->flags);
525			len = letoh16(desc64->length) & 0x3fff;
526		} else {
527			desc32 = &sc->rxq.desc32[sc->rxq.cur];
528			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
529
530			flags = letoh16(desc32->flags);
531			len = letoh16(desc32->length) & 0x3fff;
532		}
533
534		if (flags & NFE_RX_READY)
535			break;
536
537		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
538			if (!(flags & NFE_RX_VALID_V1))
539				goto skip;
540
541			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
542				flags &= ~NFE_RX_ERROR;
543				len--;	/* fix buffer length */
544			}
545		} else {
546			if (!(flags & NFE_RX_VALID_V2))
547				goto skip;
548
549			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
550				flags &= ~NFE_RX_ERROR;
551				len--;	/* fix buffer length */
552			}
553		}
554
555		if (flags & NFE_RX_ERROR) {
556			ifp->if_ierrors++;
557			goto skip;
558		}
559
560		/*
561		 * Try to allocate a new mbuf for this ring element and load
562		 * it before processing the current mbuf. If the ring element
563		 * cannot be loaded, drop the received packet and reuse the
564		 * old mbuf. In the unlikely case that the old mbuf can't be
565		 * reloaded either, explicitly panic.
566		 */
567		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
568		if (mnew == NULL) {
569			ifp->if_ierrors++;
570			goto skip;
571		}
572
573		MCLGET(mnew, M_DONTWAIT);
574		if (!(mnew->m_flags & M_EXT)) {
575			m_freem(mnew);
576			ifp->if_ierrors++;
577			goto skip;
578		}
579
580		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
581		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
582		bus_dmamap_unload(sc->sc_dmat, data->map);
583
584		error = bus_dmamap_load(sc->sc_dmat, data->map,
585		    mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
586		if (error != 0) {
587			m_freem(mnew);
588
589			/* try to reload the old mbuf */
590			error = bus_dmamap_load(sc->sc_dmat, data->map,
591			    mtod(data->m, void *), MCLBYTES, NULL,
592			    BUS_DMA_NOWAIT);
593			if (error != 0) {
594				/* very unlikely that it will fail... */
595				panic("%s: could not load old rx mbuf",
596				    sc->sc_dev.dv_xname);
597			}
598			ifp->if_ierrors++;
599			goto skip;
600		}
601
602		/*
603		 * New mbuf successfully loaded, update Rx ring and continue
604		 * processing.
605		 */
606		m = data->m;
607		data->m = mnew;
608
609		/* finalize mbuf */
610		m->m_pkthdr.len = m->m_len = len;
611		m->m_pkthdr.rcvif = ifp;
612
613#ifdef notyet
614		if (sc->sc_flags & NFE_HW_CSUM) {
615			if (flags & NFE_RX_IP_CSUMOK)
616				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
617			if (flags & NFE_RX_UDP_CSUMOK)
618				m->m_pkthdr.csum_flags |= M_UDPV4_CSUM_OUT;
619			if (flags & NFE_RX_TCP_CSUMOK)
620				m->m_pkthdr.csum_flags |= M_TCPV4_CSUM_OUT;
621		}
622#else
623		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
624			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
625#endif
626
627#if NBPFILTER > 0
628		if (ifp->if_bpf)
629			bpf_mtap(ifp->if_bpf, m);
630#endif
631		ifp->if_ipackets++;
632		ether_input_mbuf(ifp, m);
633
634skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
635#if defined(__amd64__)
636			desc64->physaddr[0] =
637			    htole32(data->map->dm_segs->ds_addr >> 32);
638#endif
639			desc64->physaddr[1] =
640			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
641			desc64->flags = htole16(NFE_RX_READY);
642			desc64->length = htole16(MCLBYTES);
643
644			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
645		} else {
646			desc32->physaddr =
647			    htole32(data->map->dm_segs->ds_addr);
648			desc32->flags = htole16(NFE_RX_READY);
649			desc32->length = htole16(MCLBYTES);
650
651			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
652		}
653
654		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
655	}
656}
657
658void
659nfe_txeof(struct nfe_softc *sc)
660{
661	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
662	struct nfe_desc32 *desc32;
663	struct nfe_desc64 *desc64;
664	struct nfe_tx_data *data;
665	uint16_t flags;
666
667/* XXX: should limit # iterations to NFE_TX_RING_COUNT */
668	for (;;) {
669		data = &sc->txq.data[sc->txq.next];
670
671		if (sc->sc_flags & NFE_40BIT_ADDR) {
672			desc64 = &sc->txq.desc64[sc->txq.next];
673			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
674
675			flags = letoh16(desc64->flags);
676		} else {
677			desc32 = &sc->txq.desc32[sc->txq.next];
678			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
679
680			flags = letoh16(desc32->flags);
681		}
682
683		if (!(flags & NFE_TX_VALID))
684			break;
685
686		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
687			if (!(flags & NFE_TX_LASTFRAG_V1))
688				goto skip;
689
690			if ((flags & NFE_TX_ERROR_V1) != 0) {
691				DPRINTF(("tx error 0x%04x\n", flags));
692				ifp->if_oerrors++;
693			} else
694				ifp->if_opackets++;
695		} else {
696			if (!(flags & NFE_TX_LASTFRAG_V2))
697				goto skip;
698
699			if ((flags & NFE_TX_ERROR_V2) != 0) {
700				DPRINTF(("tx error 0x%04x\n", flags));
701				ifp->if_oerrors++;
702			} else
703				ifp->if_opackets++;
704		}
705
706		if (data->m == NULL) {	/* should not get there */
707			DPRINTF(("last fragment bit w/o associated mbuf!\n"));
708			goto skip;
709		}
710
711		/* last fragment of the mbuf chain transmitted */
712		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
713		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
714		bus_dmamap_unload(sc->sc_dmat, data->active);
715		m_freem(data->m);
716		data->m = NULL;
717
718skip:		sc->txq.queued--;
719		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
720	}
721
722	ifp->if_timer = 0;
723	ifp->if_flags &= ~IFF_OACTIVE;
724	nfe_start(ifp);
725}
726
727int
728nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
729{
730	struct nfe_desc32 *desc32;
731	struct nfe_desc64 *desc64;
732	struct nfe_tx_data *data;
733	struct mbuf *mnew;
734	bus_dmamap_t map;
735	uint32_t txctl = NFE_RXTX_KICKTX;
736	uint16_t flags = NFE_TX_VALID;
737	int error, i;
738
739	map = sc->txq.data[sc->txq.cur].map;
740
741	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
742	if (error != 0 && error != EFBIG) {
743		printf("%s: could not map mbuf (error %d)\n",
744		    sc->sc_dev.dv_xname, error);
745		m_freem(m0);
746		return error;
747	}
748	if (error != 0) {
749		/* too many fragments, linearize */
750
751		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
752		if (mnew == NULL) {
753			m_freem(m0);
754			return ENOMEM;
755		}
756
757		M_DUP_PKTHDR(mnew, m0);
758		if (m0->m_pkthdr.len > MHLEN) {
759			MCLGET(mnew, M_DONTWAIT);
760			if (!(mnew->m_flags & M_EXT)) {
761				m_freem(m0);
762				m_freem(mnew);
763				return ENOMEM;
764			}
765		}
766
767		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
768		m_freem(m0);
769		mnew->m_len = mnew->m_pkthdr.len;
770		m0 = mnew;
771
772		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
773		    BUS_DMA_NOWAIT);
774		if (error != 0) {
775			printf("%s: could not map mbuf (error %d)\n",
776			    sc->sc_dev.dv_xname, error);
777			m_freem(m0);
778			return error;
779		}
780	}
781
782	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
783		bus_dmamap_unload(sc->sc_dmat, data->active);
784		return ENOBUFS;
785	}
786
787	/* h/w checksum (XXX only if HW_CSUM?) */
788	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
789		flags |= NFE_TX_IP_CSUM;
790	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
791		flags |= NFE_TX_TCP_CSUM;
792
793	for (i = 0; i < map->dm_nsegs; i++) {
794		data = &sc->txq.data[sc->txq.cur];
795
796		if (sc->sc_flags & NFE_40BIT_ADDR) {
797			desc64 = &sc->txq.desc64[sc->txq.cur];
798#if defined(__amd64__)
799			desc64->physaddr[0] =
800			    htole32(map->dm_segs[i].ds_addr >> 32);
801#endif
802			desc64->physaddr[1] =
803			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
804			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
805			desc64->flags = htole16(flags);
806
807			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
808		} else {
809			desc32 = &sc->txq.desc32[sc->txq.cur];
810
811			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
812			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
813			desc32->flags = htole16(flags);
814
815			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
816		}
817
818		/* csum flags belong to the first fragment only */
819		if (map->dm_nsegs > 1)
820			flags &= ~(M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT);
821
822		sc->txq.queued++;
823		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
824	}
825
826	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
827	if (sc->sc_flags & NFE_40BIT_ADDR) {
828		txctl |= NFE_RXTX_V3MAGIC;
829		flags |= NFE_TX_LASTFRAG_V2;
830
831		desc64->flags = htole16(flags);
832		nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
833	} else {
834		if (sc->sc_flags & NFE_JUMBO_SUP) {
835			txctl |= NFE_RXTX_V2MAGIC;
836			flags |= NFE_TX_LASTFRAG_V2;
837		} else
838			flags |= NFE_TX_LASTFRAG_V1;
839
840		desc32->flags = htole16(flags);
841		nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
842	}
843
844	if (sc->sc_flags & NFE_HW_CSUM)
845		txctl |= NFE_RXTX_RXCHECK;
846
847	data->m = m0;
848	data->active = map;
849
850	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
851	    BUS_DMASYNC_PREWRITE);
852
853	/* kick Tx */
854	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
855
856	return 0;
857}
858
859void
860nfe_start(struct ifnet *ifp)
861{
862	struct nfe_softc *sc = ifp->if_softc;
863	struct mbuf *m0;
864
865	for (;;) {
866		IFQ_POLL(&ifp->if_snd, m0);
867		if (m0 == NULL)
868			break;
869
870		if (nfe_encap(sc, m0) != 0) {
871			ifp->if_flags |= IFF_OACTIVE;
872			break;
873		}
874
875		/* packet put in h/w queue, remove from s/w queue */
876		IFQ_DEQUEUE(&ifp->if_snd, m0);
877
878#if NBPFILTER > 0
879		if (ifp->if_bpf != NULL)
880			bpf_mtap(ifp->if_bpf, m0);
881#endif
882
883		/* start watchdog timer */
884		ifp->if_timer = 5;
885	}
886}
887
888void
889nfe_watchdog(struct ifnet *ifp)
890{
891	struct nfe_softc *sc = ifp->if_softc;
892
893	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
894
895	ifp->if_flags &= ~IFF_RUNNING;
896	nfe_init(ifp);
897
898	ifp->if_oerrors++;
899}
900
901int
902nfe_init(struct ifnet *ifp)
903{
904	struct nfe_softc *sc = ifp->if_softc;
905	uint32_t rxtxctl;
906
907	nfe_stop(ifp, 0);
908
909	NFE_WRITE(sc, NFE_TX_UNK, 0);
910
911	rxtxctl = NFE_RXTX_BIT2;
912	if (sc->sc_flags & NFE_40BIT_ADDR)
913		rxtxctl |= NFE_RXTX_V3MAGIC;
914	else if (sc->sc_flags & NFE_JUMBO_SUP)
915		rxtxctl |= NFE_RXTX_V2MAGIC;
916	if (sc->sc_flags & NFE_HW_CSUM)
917		rxtxctl |= NFE_RXTX_RXCHECK;
918
919	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
920	DELAY(10);
921	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
922
923	NFE_WRITE(sc, NFE_SETUP_R6, 0);
924
925	/* set MAC address */
926	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
927
928	/* tell MAC where rings are in memory */
929	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
930	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
931
932	NFE_WRITE(sc, NFE_RING_SIZE,
933	    (NFE_RX_RING_COUNT - 1) << 16 |
934	    (NFE_TX_RING_COUNT - 1));
935
936	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
937	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
938	NFE_WRITE(sc, NFE_TIMER_INT, 970);	/* XXX Magic */
939
940	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
941	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
942
943	rxtxctl &= ~NFE_RXTX_BIT2;
944	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
945	DELAY(10);
946	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
947
948	/* enable Rx */
949	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
950
951	/* enable Tx */
952	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
953
954	nfe_setmulti(sc);
955
956	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
957
958	/* enable interrupts */
959	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
960
961	mii_mediachg(&sc->sc_mii);
962
963	timeout_set(&sc->sc_timeout, nfe_tick, sc);
964
965	ifp->if_flags |= IFF_RUNNING;
966	ifp->if_flags &= ~IFF_OACTIVE;
967
968	return 0;
969}
970
971void
972nfe_stop(struct ifnet *ifp, int disable)
973{
974	struct nfe_softc *sc = ifp->if_softc;
975
976	timeout_del(&sc->sc_timeout);
977
978	ifp->if_timer = 0;
979	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
980
981	/* abort Tx */
982	NFE_WRITE(sc, NFE_TX_CTL, 0);
983
984	/* disable Rx */
985	NFE_WRITE(sc, NFE_RX_CTL, 0);
986
987	/* disable interrupts */
988	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
989
990	/* reset Tx and Rx rings */
991	nfe_reset_tx_ring(sc, &sc->txq);
992	nfe_reset_rx_ring(sc, &sc->rxq);
993}
994
995int
996nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
997{
998	struct nfe_rx_data *data;
999	struct nfe_desc32 *desc32;
1000	struct nfe_desc64 *desc64;
1001	void **desc;
1002	int i, nsegs, error, descsize;
1003
1004	if (sc->sc_flags & NFE_40BIT_ADDR) {
1005		desc = (void **)&ring->desc64;
1006		descsize = sizeof (struct nfe_desc64);
1007	} else {
1008		desc = (void **)&ring->desc32;
1009		descsize = sizeof (struct nfe_desc32);
1010	}
1011
1012	ring->cur = ring->next = 0;
1013
1014	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1015	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1016	if (error != 0) {
1017		printf("%s: could not create desc DMA map\n",
1018		    sc->sc_dev.dv_xname);
1019		goto fail;
1020	}
1021
1022	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1023	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1024	if (error != 0) {
1025		printf("%s: could not allocate DMA memory\n",
1026		    sc->sc_dev.dv_xname);
1027		goto fail;
1028	}
1029
1030	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1031	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1032	if (error != 0) {
1033		printf("%s: could not map desc DMA memory\n",
1034		    sc->sc_dev.dv_xname);
1035		goto fail;
1036	}
1037
1038	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1039	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1040	if (error != 0) {
1041		printf("%s: could not load desc DMA map\n",
1042		    sc->sc_dev.dv_xname);
1043		goto fail;
1044	}
1045
1046	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1047	ring->physaddr = ring->map->dm_segs->ds_addr;
1048
1049	/*
1050	 * Pre-allocate Rx buffers and populate Rx ring.
1051	 */
1052	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1053		data = &sc->rxq.data[i];
1054
1055		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1056		    0, BUS_DMA_NOWAIT, &data->map);
1057		if (error != 0) {
1058			printf("%s: could not create DMA map\n",
1059			    sc->sc_dev.dv_xname);
1060			goto fail;
1061		}
1062
1063		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1064		if (data->m == NULL) {
1065			printf("%s: could not allocate rx mbuf\n",
1066			    sc->sc_dev.dv_xname);
1067			error = ENOMEM;
1068			goto fail;
1069		}
1070
1071		MCLGET(data->m, M_DONTWAIT);
1072		if (!(data->m->m_flags & M_EXT)) {
1073			printf("%s: could not allocate rx mbuf cluster\n",
1074			    sc->sc_dev.dv_xname);
1075			error = ENOMEM;
1076			goto fail;
1077		}
1078
1079		error = bus_dmamap_load(sc->sc_dmat, data->map,
1080		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
1081		if (error != 0) {
1082			printf("%s: could not load rx buf DMA map",
1083			    sc->sc_dev.dv_xname);
1084			goto fail;
1085		}
1086
1087		if (sc->sc_flags & NFE_40BIT_ADDR) {
1088			desc64 = &sc->rxq.desc64[i];
1089#if defined(__amd64__)
1090			desc64->physaddr[0] =
1091			    htole32(data->map->dm_segs->ds_addr >> 32);
1092#endif
1093			desc64->physaddr[1] =
1094			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
1095			desc64->length = htole16(MCLBYTES);
1096			desc64->flags = htole16(NFE_RX_READY);
1097		} else {
1098			desc32 = &sc->rxq.desc32[i];
1099			desc32->physaddr =
1100			    htole32(data->map->dm_segs->ds_addr);
1101			desc32->length = htole16(MCLBYTES);
1102			desc32->flags = htole16(NFE_RX_READY);
1103		}
1104	}
1105
1106	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1107	    BUS_DMASYNC_PREWRITE);
1108
1109	return 0;
1110
1111fail:	nfe_free_rx_ring(sc, ring);
1112	return error;
1113}
1114
1115void
1116nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1117{
1118	int i;
1119
1120	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1121		if (sc->sc_flags & NFE_40BIT_ADDR) {
1122			ring->desc64[i].length = htole16(MCLBYTES);
1123			ring->desc64[i].flags = htole16(NFE_RX_READY);
1124		} else {
1125			ring->desc32[i].length = htole16(MCLBYTES);
1126			ring->desc32[i].flags = htole16(NFE_RX_READY);
1127		}
1128	}
1129
1130	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1131	    BUS_DMASYNC_PREWRITE);
1132
1133	ring->cur = ring->next = 0;
1134}
1135
1136void
1137nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1138{
1139	struct nfe_rx_data *data;
1140	void *desc;
1141	int i, descsize;
1142
1143	if (sc->sc_flags & NFE_40BIT_ADDR) {
1144		desc = ring->desc64;
1145		descsize = sizeof (struct nfe_desc64);
1146	} else {
1147		desc = ring->desc32;
1148		descsize = sizeof (struct nfe_desc32);
1149	}
1150
1151	if (desc != NULL) {
1152		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1153		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1154		bus_dmamap_unload(sc->sc_dmat, ring->map);
1155		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1156		    NFE_RX_RING_COUNT * descsize);
1157		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1158	}
1159
1160	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1161		data = &ring->data[i];
1162
1163		if (data->m != NULL) {
1164			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1165			    data->map->dm_mapsize,
1166			    BUS_DMASYNC_POSTREAD);
1167			bus_dmamap_unload(sc->sc_dmat, data->map);
1168			m_freem(data->m);
1169		}
1170
1171		if (data->map != NULL)
1172			bus_dmamap_destroy(sc->sc_dmat, data->map);
1173	}
1174}
1175
1176int
1177nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1178{
1179	int i, nsegs, error;
1180	void **desc;
1181	int descsize;
1182
1183	if (sc->sc_flags & NFE_40BIT_ADDR) {
1184		desc = (void **)&ring->desc64;
1185		descsize = sizeof (struct nfe_desc64);
1186	} else {
1187		desc = (void **)&ring->desc32;
1188		descsize = sizeof (struct nfe_desc32);
1189	}
1190
1191	ring->queued = 0;
1192	ring->cur = ring->next = 0;
1193
1194	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1195	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1196
1197	if (error != 0) {
1198		printf("%s: could not create desc DMA map\n",
1199		    sc->sc_dev.dv_xname);
1200		goto fail;
1201	}
1202
1203	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1204	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1205	if (error != 0) {
1206		printf("%s: could not allocate DMA memory\n",
1207		    sc->sc_dev.dv_xname);
1208		goto fail;
1209	}
1210
1211	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1212	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1213	if (error != 0) {
1214		printf("%s: could not map desc DMA memory\n",
1215		    sc->sc_dev.dv_xname);
1216		goto fail;
1217	}
1218
1219	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1220	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1221	if (error != 0) {
1222		printf("%s: could not load desc DMA map\n",
1223		    sc->sc_dev.dv_xname);
1224		goto fail;
1225	}
1226
1227	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1228	ring->physaddr = ring->map->dm_segs->ds_addr;
1229
1230	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1231		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1232		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
1233		    &ring->data[i].map);
1234		if (error != 0) {
1235			printf("%s: could not create DMA map\n",
1236			    sc->sc_dev.dv_xname);
1237			goto fail;
1238		}
1239	}
1240
1241	return 0;
1242
1243fail:	nfe_free_tx_ring(sc, ring);
1244	return error;
1245}
1246
1247void
1248nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1249{
1250	struct nfe_tx_data *data;
1251	int i;
1252
1253	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1254		if (sc->sc_flags & NFE_40BIT_ADDR)
1255			ring->desc64[i].flags = 0;
1256		else
1257			ring->desc32[i].flags = 0;
1258
1259		data = &ring->data[i];
1260
1261		if (data->m != NULL) {
1262			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1263			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1264			bus_dmamap_unload(sc->sc_dmat, data->map);
1265			m_freem(data->m);
1266			data->m = NULL;
1267		}
1268	}
1269
1270	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1271	    BUS_DMASYNC_PREWRITE);
1272
1273	ring->queued = 0;
1274	ring->cur = ring->next = 0;
1275}
1276
1277void
1278nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1279{
1280	struct nfe_tx_data *data;
1281	void *desc;
1282	int i, descsize;
1283
1284	if (sc->sc_flags & NFE_40BIT_ADDR) {
1285		desc = ring->desc64;
1286		descsize = sizeof (struct nfe_desc64);
1287	} else {
1288		desc = ring->desc32;
1289		descsize = sizeof (struct nfe_desc32);
1290	}
1291
1292	if (desc != NULL) {
1293		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1294		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1295		bus_dmamap_unload(sc->sc_dmat, ring->map);
1296		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1297		    NFE_TX_RING_COUNT * descsize);
1298		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1299	}
1300
1301	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1302		data = &ring->data[i];
1303
1304		if (data->m != NULL) {
1305			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1306			    data->map->dm_mapsize,
1307			    BUS_DMASYNC_POSTWRITE);
1308			bus_dmamap_unload(sc->sc_dmat, data->map);
1309			m_freem(data->m);
1310		}
1311
1312		if (data->map != NULL)
1313			bus_dmamap_destroy(sc->sc_dmat, data->map);
1314	}
1315}
1316
1317int
1318nfe_mediachange(struct ifnet *ifp)
1319{
1320	struct nfe_softc *sc = ifp->if_softc;
1321	struct mii_data	*mii = &sc->sc_mii;
1322	uint32_t val;
1323
1324	DPRINTF(("nfe_mediachange\n"));
1325#if 0
1326	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1327		/* XXX? */
1328	else
1329#endif
1330		val = 0;
1331
1332	val |= NFE_MEDIA_SET;
1333
1334	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1335	case IFM_1000_T:
1336		val |= NFE_MEDIA_1000T;
1337		break;
1338	case IFM_100_TX:
1339		val |= NFE_MEDIA_100TX;
1340		break;
1341	case IFM_10_T:
1342		val |= NFE_MEDIA_10T;
1343		break;
1344	}
1345
1346	DPRINTF(("nfe_miibus_statchg: val=0x%x\n", val));
1347	NFE_WRITE(sc, NFE_LINKSPEED, val);
1348
1349	return 0;
1350}
1351
1352void
1353nfe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1354{
1355	struct nfe_softc *sc = ifp->if_softc;
1356
1357	mii_pollstat(&sc->sc_mii);
1358	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1359	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1360}
1361
1362void
1363nfe_setmulti(struct nfe_softc *sc)
1364{
1365	NFE_WRITE(sc, NFE_MULT_ADDR1, 0x01);
1366	NFE_WRITE(sc, NFE_MULT_ADDR2, 0);
1367	NFE_WRITE(sc, NFE_MULT_MASK1, 0);
1368	NFE_WRITE(sc, NFE_MULT_MASK2, 0);
1369#ifdef notyet
1370	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_MYADDR);
1371#else
1372	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_PROMISC);
1373#endif
1374}
1375
1376void
1377nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1378{
1379	uint32_t tmp;
1380
1381	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1382	addr[0] = (tmp >> 8) & 0xff;
1383	addr[1] = (tmp & 0xff);
1384
1385	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1386	addr[2] = (tmp >> 24) & 0xff;
1387	addr[3] = (tmp >> 16) & 0xff;
1388	addr[4] = (tmp >>  8) & 0xff;
1389	addr[5] = (tmp & 0xff);
1390}
1391
1392void
1393nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1394{
1395	NFE_WRITE(sc, NFE_MACADDR_LO,
1396	    addr[5] <<  8 | addr[4]);
1397	NFE_WRITE(sc, NFE_MACADDR_HI,
1398	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1399}
1400
1401void
1402nfe_update_promisc(struct nfe_softc *sc)
1403{
1404}
1405
1406void
1407nfe_tick(void *arg)
1408{
1409	struct nfe_softc *sc = arg;
1410	int s;
1411
1412	s = splnet();
1413	mii_tick(&sc->sc_mii);
1414	splx(s);
1415
1416	timeout_add(&sc->sc_timeout, hz);
1417}
1418