if_nfe.c revision 1.19
1/*	$OpenBSD: if_nfe.c,v 1.19 2006/02/05 23:37:21 brad Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for nvidia nForce Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/malloc.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_media.h>
41
42#ifdef INET
43#include <netinet/in.h>
44#include <netinet/in_systm.h>
45#include <netinet/in_var.h>
46#include <netinet/ip.h>
47#include <netinet/if_ether.h>
48#endif
49
50#if NVLAN > 0
51#include <net/if_types.h>
52#include <net/if_vlan_var.h>
53#endif
54
55#if NBPFILTER > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64#include <dev/pci/pcidevs.h>
65
66#include <dev/pci/if_nfereg.h>
67#include <dev/pci/if_nfevar.h>
68
69int	nfe_match(struct device *, void *, void *);
70void	nfe_attach(struct device *, struct device *, void *);
71void	nfe_power(int, void *);
72void	nfe_miibus_statchg(struct device *);
73int	nfe_miibus_readreg(struct device *, int, int);
74void	nfe_miibus_writereg(struct device *, int, int, int);
75int	nfe_intr(void *);
76int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
77void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
78void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
79void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81void	nfe_rxeof(struct nfe_softc *);
82void	nfe_txeof(struct nfe_softc *);
83int	nfe_encap(struct nfe_softc *, struct mbuf *);
84void	nfe_start(struct ifnet *);
85void	nfe_watchdog(struct ifnet *);
86int	nfe_init(struct ifnet *);
87void	nfe_stop(struct ifnet *, int);
88int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
89void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
90void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
91int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
92void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
93void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
94int	nfe_ifmedia_upd(struct ifnet *);
95void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96void	nfe_setmulti(struct nfe_softc *);
97void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
98void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
99void	nfe_tick(void *);
100
101struct cfattach nfe_ca = {
102	sizeof (struct nfe_softc), nfe_match, nfe_attach
103};
104
105struct cfdriver nfe_cd = {
106	NULL, "nfe", DV_IFNET
107};
108
109#ifdef NFE_DEBUG
110int nfedebug = 0;
111#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
112#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
113#else
114#define DPRINTF(x)
115#define DPRINTFN(n,x)
116#endif
117
118const struct pci_matchid nfe_devices[] = {
119	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
120	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }
134};
135
136int
137nfe_match(struct device *dev, void *match, void *aux)
138{
139	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
140	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
141}
142
143void
144nfe_attach(struct device *parent, struct device *self, void *aux)
145{
146	struct nfe_softc *sc = (struct nfe_softc *)self;
147	struct pci_attach_args *pa = aux;
148	pci_chipset_tag_t pc = pa->pa_pc;
149	pci_intr_handle_t ih;
150	const char *intrstr;
151	struct ifnet *ifp;
152	bus_size_t memsize;
153
154	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
155	    &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) {
156		printf(": can't map mem space\n");
157		return;
158	}
159
160	if (pci_intr_map(pa, &ih) != 0) {
161		printf(": couldn't map interrupt\n");
162		return;
163	}
164
165	intrstr = pci_intr_string(pc, ih);
166	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
167	    sc->sc_dev.dv_xname);
168	if (sc->sc_ih == NULL) {
169		printf(": couldn't establish interrupt");
170		if (intrstr != NULL)
171			printf(" at %s", intrstr);
172		printf("\n");
173		return;
174	}
175	printf(": %s", intrstr);
176
177	sc->sc_dmat = pa->pa_dmat;
178
179	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
180	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
181
182	sc->sc_flags = 0;
183
184	switch (PCI_PRODUCT(pa->pa_id)) {
185	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
186	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
187	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
188	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
189		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
190		break;
191	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
192	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
193		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
194		break;
195	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
196	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
197	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
198	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
199	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
200	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
201		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
202		break;
203	}
204
205	/*
206	 * Allocate Tx and Rx rings.
207	 */
208	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
209		printf("%s: could not allocate Tx ring\n",
210		    sc->sc_dev.dv_xname);
211		return;
212	}
213
214	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
215		printf("%s: could not allocate Rx ring\n",
216		    sc->sc_dev.dv_xname);
217		nfe_free_tx_ring(sc, &sc->txq);
218		return;
219	}
220
221	ifp = &sc->sc_arpcom.ac_if;
222	ifp->if_softc = sc;
223	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
224	ifp->if_ioctl = nfe_ioctl;
225	ifp->if_start = nfe_start;
226	ifp->if_watchdog = nfe_watchdog;
227	ifp->if_init = nfe_init;
228	ifp->if_baudrate = IF_Gbps(1);
229	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
230	IFQ_SET_READY(&ifp->if_snd);
231	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
232
233	if (sc->sc_flags & NFE_HW_CSUM) {
234		ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
235		    IFCAP_CSUM_UDPv4;
236	}
237
238	sc->sc_mii.mii_ifp = ifp;
239	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
240	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
241	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
242
243	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
244	    nfe_ifmedia_sts);
245	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
246	    MII_OFFSET_ANY, 0);
247	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
248		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
249		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
250		    0, NULL);
251		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
252	} else
253		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
254
255	if_attach(ifp);
256	ether_ifattach(ifp);
257
258	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
259
260	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
261}
262
263void
264nfe_power(int why, void *arg)
265{
266	struct nfe_softc *sc = arg;
267	struct ifnet *ifp;
268
269	if (why == PWR_RESUME) {
270		ifp = &sc->sc_arpcom.ac_if;
271		if (ifp->if_flags & IFF_UP) {
272			ifp->if_flags &= ~IFF_RUNNING;
273			nfe_init(ifp);
274			if (ifp->if_flags & IFF_RUNNING)
275				nfe_start(ifp);
276		}
277	}
278}
279
280void
281nfe_miibus_statchg(struct device *dev)
282{
283	struct nfe_softc *sc = (struct nfe_softc *)dev;
284	struct mii_data *mii = &sc->sc_mii;
285	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
286
287	phy = NFE_READ(sc, NFE_PHY_IFACE);
288	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
289
290	seed = NFE_READ(sc, NFE_RNDSEED);
291	seed &= ~NFE_SEED_MASK;
292
293	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
294		phy  |= NFE_PHY_HDX;	/* half-duplex */
295		misc |= NFE_MISC1_HDX;
296	}
297
298	switch (IFM_SUBTYPE(mii->mii_media_active)) {
299	case IFM_1000_T:	/* full-duplex only */
300		link |= NFE_MEDIA_1000T;
301		seed |= NFE_SEED_1000T;
302		phy  |= NFE_PHY_1000T;
303		break;
304	case IFM_100_TX:
305		link |= NFE_MEDIA_100TX;
306		seed |= NFE_SEED_100TX;
307		phy  |= NFE_PHY_100TX;
308		break;
309	case IFM_10_T:
310		link |= NFE_MEDIA_10T;
311		seed |= NFE_SEED_10T;
312		break;
313	}
314
315	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
316
317	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
318	NFE_WRITE(sc, NFE_MISC1, misc);
319	NFE_WRITE(sc, NFE_LINKSPEED, link);
320}
321
322int
323nfe_miibus_readreg(struct device *dev, int phy, int reg)
324{
325	struct nfe_softc *sc = (struct nfe_softc *)dev;
326	uint32_t val;
327	int ntries;
328
329	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
330
331	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
332		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
333		DELAY(100);
334	}
335
336	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
337
338	for (ntries = 0; ntries < 1000; ntries++) {
339		DELAY(100);
340		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
341			break;
342	}
343	if (ntries == 1000) {
344		DPRINTFN(2, ("timeout waiting for PHY\n"));
345		return 0;
346	}
347
348	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
349		DPRINTFN(2, ("could not read PHY\n"));
350		return 0;
351	}
352
353	val = NFE_READ(sc, NFE_PHY_DATA);
354	if (val != 0xffffffff && val != 0)
355		sc->phyaddr = phy;
356
357	DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val));
358
359	return val;
360}
361
362void
363nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
364{
365	struct nfe_softc *sc = (struct nfe_softc *)dev;
366	uint32_t ctl;
367	int ntries;
368
369	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
370
371	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
372		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
373		DELAY(100);
374	}
375
376	NFE_WRITE(sc, NFE_PHY_DATA, val);
377	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
378	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
379
380	for (ntries = 0; ntries < 1000; ntries++) {
381		DELAY(100);
382		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
383			break;
384	}
385#ifdef NFE_DEBUG
386	if (nfedebug >= 2 && ntries == 1000)
387		printf("could not write to PHY\n");
388#endif
389}
390
391int
392nfe_intr(void *arg)
393{
394	struct nfe_softc *sc = arg;
395	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
396	uint32_t r;
397
398	/* disable interrupts */
399	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
400
401	r = NFE_READ(sc, NFE_IRQ_STATUS);
402	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
403
404	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
405
406	if (r == 0) {
407		/* re-enable interrupts */
408		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
409		return 0;
410	}
411
412	if (r & NFE_IRQ_LINK) {
413		NFE_READ(sc, NFE_PHY_STATUS);
414		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
415		DPRINTF(("link state changed\n"));
416	}
417
418	if (ifp->if_flags & IFF_RUNNING) {
419		/* check Rx ring */
420		nfe_rxeof(sc);
421
422		/* check Tx ring */
423		nfe_txeof(sc);
424	}
425
426	/* re-enable interrupts */
427	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
428
429	return 1;
430}
431
432int
433nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
434{
435	struct nfe_softc *sc = ifp->if_softc;
436	struct ifreq *ifr = (struct ifreq *)data;
437	struct ifaddr *ifa = (struct ifaddr *)data;
438	int s, error = 0;
439
440	s = splnet();
441
442	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
443		splx(s);
444		return error;
445	}
446
447	switch (cmd) {
448	case SIOCSIFADDR:
449		ifp->if_flags |= IFF_UP;
450		nfe_init(ifp);
451		switch (ifa->ifa_addr->sa_family) {
452#ifdef INET
453		case AF_INET:
454			arp_ifinit(&sc->sc_arpcom, ifa);
455			break;
456#endif
457		default:
458			break;
459		}
460		break;
461	case SIOCSIFMTU:
462		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
463			error = EINVAL;
464		else if (ifp->if_mtu != ifr->ifr_mtu)
465			ifp->if_mtu = ifr->ifr_mtu;
466		break;
467	case SIOCSIFFLAGS:
468		if (ifp->if_flags & IFF_UP) {
469			/*
470			 * If only the PROMISC or ALLMULTI flag changes, then
471			 * don't do a full re-init of the chip, just update
472			 * the Rx filter.
473			 */
474			if ((ifp->if_flags & IFF_RUNNING) &&
475			    ((ifp->if_flags ^ sc->sc_if_flags) &
476			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
477				nfe_setmulti(sc);
478			else
479				nfe_init(ifp);
480		} else {
481			if (ifp->if_flags & IFF_RUNNING)
482				nfe_stop(ifp, 1);
483		}
484		sc->sc_if_flags = ifp->if_flags;
485		break;
486	case SIOCADDMULTI:
487	case SIOCDELMULTI:
488		error = (cmd == SIOCADDMULTI) ?
489		    ether_addmulti(ifr, &sc->sc_arpcom) :
490		    ether_delmulti(ifr, &sc->sc_arpcom);
491
492		if (error == ENETRESET) {
493			if (ifp->if_flags & IFF_RUNNING)
494				nfe_setmulti(sc);
495			error = 0;
496		}
497		break;
498	case SIOCSIFMEDIA:
499	case SIOCGIFMEDIA:
500		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
501		break;
502	default:
503		error = EINVAL;
504	}
505
506	splx(s);
507
508	return error;
509}
510
511void
512nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
513{
514	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
515	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
516	    sizeof (struct nfe_desc32), ops);
517}
518
519void
520nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
521{
522	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
523	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
524	    sizeof (struct nfe_desc64), ops);
525}
526
527void
528nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
529{
530	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
531	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
532	    sizeof (struct nfe_desc32), ops);
533}
534
535void
536nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
537{
538	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
539	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
540	    sizeof (struct nfe_desc64), ops);
541}
542
543void
544nfe_rxeof(struct nfe_softc *sc)
545{
546	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
547	struct nfe_desc32 *desc32;
548	struct nfe_desc64 *desc64;
549	struct nfe_rx_data *data;
550	struct mbuf *m, *mnew;
551	uint16_t flags;
552	int error, len;
553
554	for (;;) {
555		data = &sc->rxq.data[sc->rxq.cur];
556
557		if (sc->sc_flags & NFE_40BIT_ADDR) {
558			desc64 = &sc->rxq.desc64[sc->rxq.cur];
559			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
560
561			flags = letoh16(desc64->flags);
562			len = letoh16(desc64->length) & 0x3fff;
563		} else {
564			desc32 = &sc->rxq.desc32[sc->rxq.cur];
565			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
566
567			flags = letoh16(desc32->flags);
568			len = letoh16(desc32->length) & 0x3fff;
569		}
570
571		if (flags & NFE_RX_READY)
572			break;
573
574		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
575			if (!(flags & NFE_RX_VALID_V1))
576				goto skip;
577
578			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
579				flags &= ~NFE_RX_ERROR;
580				len--;	/* fix buffer length */
581			}
582		} else {
583			if (!(flags & NFE_RX_VALID_V2))
584				goto skip;
585
586			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
587				flags &= ~NFE_RX_ERROR;
588				len--;	/* fix buffer length */
589			}
590		}
591
592		if (flags & NFE_RX_ERROR) {
593			ifp->if_ierrors++;
594			goto skip;
595		}
596
597		/*
598		 * Try to allocate a new mbuf for this ring element and load
599		 * it before processing the current mbuf. If the ring element
600		 * cannot be loaded, drop the received packet and reuse the
601		 * old mbuf. In the unlikely case that the old mbuf can't be
602		 * reloaded either, explicitly panic.
603		 */
604		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
605		if (mnew == NULL) {
606			ifp->if_ierrors++;
607			goto skip;
608		}
609
610		MCLGET(mnew, M_DONTWAIT);
611		if (!(mnew->m_flags & M_EXT)) {
612			m_freem(mnew);
613			ifp->if_ierrors++;
614			goto skip;
615		}
616
617		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
618		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
619		bus_dmamap_unload(sc->sc_dmat, data->map);
620
621		error = bus_dmamap_load(sc->sc_dmat, data->map,
622		    mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
623		if (error != 0) {
624			m_freem(mnew);
625
626			/* try to reload the old mbuf */
627			error = bus_dmamap_load(sc->sc_dmat, data->map,
628			    mtod(data->m, void *), MCLBYTES, NULL,
629			    BUS_DMA_NOWAIT);
630			if (error != 0) {
631				/* very unlikely that it will fail... */
632				panic("%s: could not load old rx mbuf",
633				    sc->sc_dev.dv_xname);
634			}
635			ifp->if_ierrors++;
636			goto skip;
637		}
638
639		/*
640		 * New mbuf successfully loaded, update Rx ring and continue
641		 * processing.
642		 */
643		m = data->m;
644		data->m = mnew;
645
646		/* finalize mbuf */
647		m->m_pkthdr.len = m->m_len = len;
648		m->m_pkthdr.rcvif = ifp;
649
650#ifdef notyet
651		if (sc->sc_flags & NFE_HW_CSUM) {
652			if (flags & NFE_RX_IP_CSUMOK)
653				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
654			if (flags & NFE_RX_UDP_CSUMOK)
655				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
656			if (flags & NFE_RX_TCP_CSUMOK)
657				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
658		}
659#else
660		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
661			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
662#endif
663
664#if NBPFILTER > 0
665		if (ifp->if_bpf)
666			bpf_mtap(ifp->if_bpf, m);
667#endif
668		ifp->if_ipackets++;
669		ether_input_mbuf(ifp, m);
670
671skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
672#if defined(__amd64__)
673			desc64->physaddr[0] =
674			    htole32(data->map->dm_segs->ds_addr >> 32);
675#endif
676			desc64->physaddr[1] =
677			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
678			desc64->flags = htole16(NFE_RX_READY);
679			desc64->length = htole16(MCLBYTES);
680
681			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
682		} else {
683			desc32->physaddr =
684			    htole32(data->map->dm_segs->ds_addr);
685			desc32->flags = htole16(NFE_RX_READY);
686			desc32->length = htole16(MCLBYTES);
687
688			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
689		}
690
691		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
692	}
693}
694
695void
696nfe_txeof(struct nfe_softc *sc)
697{
698	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
699	struct nfe_desc32 *desc32;
700	struct nfe_desc64 *desc64;
701	struct nfe_tx_data *data;
702	uint16_t flags;
703
704	while (sc->txq.next != sc->txq.cur) {
705		data = &sc->txq.data[sc->txq.next];
706
707		if (sc->sc_flags & NFE_40BIT_ADDR) {
708			desc64 = &sc->txq.desc64[sc->txq.next];
709			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
710
711			flags = letoh16(desc64->flags);
712		} else {
713			desc32 = &sc->txq.desc32[sc->txq.next];
714			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
715
716			flags = letoh16(desc32->flags);
717		}
718
719		if (flags & NFE_TX_VALID)
720			break;
721
722		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
723			if (!(flags & NFE_TX_LASTFRAG_V1))
724				goto skip;
725
726			if ((flags & NFE_TX_ERROR_V1) != 0) {
727				DPRINTF(("tx error 0x%04x\n", flags));
728				ifp->if_oerrors++;
729			} else
730				ifp->if_opackets++;
731		} else {
732			if (!(flags & NFE_TX_LASTFRAG_V2))
733				goto skip;
734
735			if ((flags & NFE_TX_ERROR_V2) != 0) {
736				DPRINTF(("tx error 0x%04x\n", flags));
737				ifp->if_oerrors++;
738			} else
739				ifp->if_opackets++;
740		}
741
742		if (data->m == NULL) {	/* should not get there */
743			DPRINTF(("last fragment bit w/o associated mbuf!\n"));
744			goto skip;
745		}
746
747		/* last fragment of the mbuf chain transmitted */
748		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
749		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
750		bus_dmamap_unload(sc->sc_dmat, data->active);
751		m_freem(data->m);
752		data->m = NULL;
753
754skip:		sc->txq.queued--;
755		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
756	}
757
758	ifp->if_timer = 0;
759	ifp->if_flags &= ~IFF_OACTIVE;
760	nfe_start(ifp);
761}
762
763int
764nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
765{
766	struct nfe_desc32 *desc32;
767	struct nfe_desc64 *desc64;
768	struct nfe_tx_data *data;
769	struct mbuf *mnew;
770	bus_dmamap_t map;
771	uint32_t txctl = NFE_RXTX_KICKTX;
772	uint16_t flags = NFE_TX_VALID;
773	int error, i;
774
775	map = sc->txq.data[sc->txq.cur].map;
776
777	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
778	if (error != 0 && error != EFBIG) {
779		printf("%s: could not map mbuf (error %d)\n",
780		    sc->sc_dev.dv_xname, error);
781		m_freem(m0);
782		return error;
783	}
784	if (error != 0) {
785		/* too many fragments, linearize */
786
787		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
788		if (mnew == NULL) {
789			m_freem(m0);
790			return ENOMEM;
791		}
792
793		M_DUP_PKTHDR(mnew, m0);
794		if (m0->m_pkthdr.len > MHLEN) {
795			MCLGET(mnew, M_DONTWAIT);
796			if (!(mnew->m_flags & M_EXT)) {
797				m_freem(m0);
798				m_freem(mnew);
799				return ENOMEM;
800			}
801		}
802
803		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
804		m_freem(m0);
805		mnew->m_len = mnew->m_pkthdr.len;
806		m0 = mnew;
807
808		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
809		    BUS_DMA_NOWAIT);
810		if (error != 0) {
811			printf("%s: could not map mbuf (error %d)\n",
812			    sc->sc_dev.dv_xname, error);
813			m_freem(m0);
814			return error;
815		}
816	}
817
818	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
819		bus_dmamap_unload(sc->sc_dmat, map);
820		return ENOBUFS;
821	}
822
823	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
824		flags |= NFE_TX_IP_CSUM;
825	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
826		flags |= NFE_TX_TCP_CSUM;
827
828	for (i = 0; i < map->dm_nsegs; i++) {
829		data = &sc->txq.data[sc->txq.cur];
830
831		if (sc->sc_flags & NFE_40BIT_ADDR) {
832			desc64 = &sc->txq.desc64[sc->txq.cur];
833#if defined(__amd64__)
834			desc64->physaddr[0] =
835			    htole32(map->dm_segs[i].ds_addr >> 32);
836#endif
837			desc64->physaddr[1] =
838			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
839			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
840			desc64->flags = htole16(flags);
841
842			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
843		} else {
844			desc32 = &sc->txq.desc32[sc->txq.cur];
845
846			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
847			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
848			desc32->flags = htole16(flags);
849
850			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
851		}
852
853		/* csum flags belong to the first fragment only */
854		if (map->dm_nsegs > 1)
855			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
856
857		sc->txq.queued++;
858		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
859	}
860
861	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
862	if (sc->sc_flags & NFE_40BIT_ADDR) {
863		txctl |= NFE_RXTX_V3MAGIC;
864		flags |= NFE_TX_LASTFRAG_V2;
865
866		desc64->flags = htole16(flags);
867		nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
868	} else {
869		if (sc->sc_flags & NFE_JUMBO_SUP) {
870			txctl |= NFE_RXTX_V2MAGIC;
871			flags |= NFE_TX_LASTFRAG_V2;
872		} else
873			flags |= NFE_TX_LASTFRAG_V1;
874
875		desc32->flags = htole16(flags);
876		nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
877	}
878
879	if (sc->sc_flags & NFE_HW_CSUM)
880		txctl |= NFE_RXTX_RXCHECK;
881
882	data->m = m0;
883	data->active = map;
884
885	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
886	    BUS_DMASYNC_PREWRITE);
887
888	/* kick Tx */
889	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
890
891	return 0;
892}
893
894void
895nfe_start(struct ifnet *ifp)
896{
897	struct nfe_softc *sc = ifp->if_softc;
898	struct mbuf *m0;
899
900	for (;;) {
901		IFQ_POLL(&ifp->if_snd, m0);
902		if (m0 == NULL)
903			break;
904
905		if (nfe_encap(sc, m0) != 0) {
906			ifp->if_flags |= IFF_OACTIVE;
907			break;
908		}
909
910		/* packet put in h/w queue, remove from s/w queue */
911		IFQ_DEQUEUE(&ifp->if_snd, m0);
912
913#if NBPFILTER > 0
914		if (ifp->if_bpf != NULL)
915			bpf_mtap(ifp->if_bpf, m0);
916#endif
917
918		/* start watchdog timer */
919		ifp->if_timer = 5;
920	}
921}
922
923void
924nfe_watchdog(struct ifnet *ifp)
925{
926	struct nfe_softc *sc = ifp->if_softc;
927
928	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
929
930	ifp->if_flags &= ~IFF_RUNNING;
931	nfe_init(ifp);
932
933	ifp->if_oerrors++;
934}
935
936int
937nfe_init(struct ifnet *ifp)
938{
939	struct nfe_softc *sc = ifp->if_softc;
940	uint32_t rxtxctl;
941
942	if (ifp->if_flags & IFF_RUNNING)
943		return 0;
944
945	nfe_stop(ifp, 0);
946
947	NFE_WRITE(sc, NFE_TX_UNK, 0);
948
949	rxtxctl = NFE_RXTX_BIT2;
950	if (sc->sc_flags & NFE_40BIT_ADDR)
951		rxtxctl |= NFE_RXTX_V3MAGIC;
952	else if (sc->sc_flags & NFE_JUMBO_SUP)
953		rxtxctl |= NFE_RXTX_V2MAGIC;
954	if (sc->sc_flags & NFE_HW_CSUM)
955		rxtxctl |= NFE_RXTX_RXCHECK;
956
957	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
958	DELAY(10);
959	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
960
961	NFE_WRITE(sc, NFE_SETUP_R6, 0);
962
963	/* set MAC address */
964	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
965
966	/* tell MAC where rings are in memory */
967	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
968	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
969
970	NFE_WRITE(sc, NFE_RING_SIZE,
971	    (NFE_RX_RING_COUNT - 1) << 16 |
972	    (NFE_TX_RING_COUNT - 1));
973
974	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
975	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
976	NFE_WRITE(sc, NFE_TIMER_INT, 970);	/* XXX Magic */
977
978	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
979	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
980
981	rxtxctl &= ~NFE_RXTX_BIT2;
982	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
983	DELAY(10);
984	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
985
986	/* configure media */
987	mii_mediachg(&sc->sc_mii);
988
989	/* set Rx filter */
990	nfe_setmulti(sc);
991
992	/* enable Rx */
993	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
994
995	/* enable Tx */
996	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
997
998	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
999
1000	/* enable interrupts */
1001	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1002
1003	timeout_add(&sc->sc_tick_ch, hz);
1004
1005	ifp->if_flags |= IFF_RUNNING;
1006	ifp->if_flags &= ~IFF_OACTIVE;
1007
1008	return 0;
1009}
1010
1011void
1012nfe_stop(struct ifnet *ifp, int disable)
1013{
1014	struct nfe_softc *sc = ifp->if_softc;
1015
1016	timeout_del(&sc->sc_tick_ch);
1017
1018	ifp->if_timer = 0;
1019	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1020
1021	mii_down(&sc->sc_mii);
1022
1023	/* abort Tx */
1024	NFE_WRITE(sc, NFE_TX_CTL, 0);
1025
1026	/* disable Rx */
1027	NFE_WRITE(sc, NFE_RX_CTL, 0);
1028
1029	/* disable interrupts */
1030	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1031
1032	/* reset Tx and Rx rings */
1033	nfe_reset_tx_ring(sc, &sc->txq);
1034	nfe_reset_rx_ring(sc, &sc->rxq);
1035}
1036
1037int
1038nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1039{
1040	struct nfe_rx_data *data;
1041	struct nfe_desc32 *desc32;
1042	struct nfe_desc64 *desc64;
1043	void **desc;
1044	int i, nsegs, error, descsize;
1045
1046	if (sc->sc_flags & NFE_40BIT_ADDR) {
1047		desc = (void **)&ring->desc64;
1048		descsize = sizeof (struct nfe_desc64);
1049	} else {
1050		desc = (void **)&ring->desc32;
1051		descsize = sizeof (struct nfe_desc32);
1052	}
1053
1054	ring->cur = ring->next = 0;
1055
1056	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1057	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1058	if (error != 0) {
1059		printf("%s: could not create desc DMA map\n",
1060		    sc->sc_dev.dv_xname);
1061		goto fail;
1062	}
1063
1064	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1065	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1066	if (error != 0) {
1067		printf("%s: could not allocate DMA memory\n",
1068		    sc->sc_dev.dv_xname);
1069		goto fail;
1070	}
1071
1072	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1073	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1074	if (error != 0) {
1075		printf("%s: could not map desc DMA memory\n",
1076		    sc->sc_dev.dv_xname);
1077		goto fail;
1078	}
1079
1080	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1081	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1082	if (error != 0) {
1083		printf("%s: could not load desc DMA map\n",
1084		    sc->sc_dev.dv_xname);
1085		goto fail;
1086	}
1087
1088	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1089	ring->physaddr = ring->map->dm_segs->ds_addr;
1090
1091	/*
1092	 * Pre-allocate Rx buffers and populate Rx ring.
1093	 */
1094	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1095		data = &sc->rxq.data[i];
1096
1097		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1098		    0, BUS_DMA_NOWAIT, &data->map);
1099		if (error != 0) {
1100			printf("%s: could not create DMA map\n",
1101			    sc->sc_dev.dv_xname);
1102			goto fail;
1103		}
1104
1105		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1106		if (data->m == NULL) {
1107			printf("%s: could not allocate rx mbuf\n",
1108			    sc->sc_dev.dv_xname);
1109			error = ENOMEM;
1110			goto fail;
1111		}
1112
1113		MCLGET(data->m, M_DONTWAIT);
1114		if (!(data->m->m_flags & M_EXT)) {
1115			printf("%s: could not allocate rx mbuf cluster\n",
1116			    sc->sc_dev.dv_xname);
1117			error = ENOMEM;
1118			goto fail;
1119		}
1120
1121		error = bus_dmamap_load(sc->sc_dmat, data->map,
1122		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
1123		if (error != 0) {
1124			printf("%s: could not load rx buf DMA map",
1125			    sc->sc_dev.dv_xname);
1126			goto fail;
1127		}
1128
1129		if (sc->sc_flags & NFE_40BIT_ADDR) {
1130			desc64 = &sc->rxq.desc64[i];
1131#if defined(__amd64__)
1132			desc64->physaddr[0] =
1133			    htole32(data->map->dm_segs->ds_addr >> 32);
1134#endif
1135			desc64->physaddr[1] =
1136			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
1137			desc64->length = htole16(MCLBYTES);
1138			desc64->flags = htole16(NFE_RX_READY);
1139		} else {
1140			desc32 = &sc->rxq.desc32[i];
1141			desc32->physaddr =
1142			    htole32(data->map->dm_segs->ds_addr);
1143			desc32->length = htole16(MCLBYTES);
1144			desc32->flags = htole16(NFE_RX_READY);
1145		}
1146	}
1147
1148	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1149	    BUS_DMASYNC_PREWRITE);
1150
1151	return 0;
1152
1153fail:	nfe_free_rx_ring(sc, ring);
1154	return error;
1155}
1156
1157void
1158nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1159{
1160	int i;
1161
1162	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1163		if (sc->sc_flags & NFE_40BIT_ADDR) {
1164			ring->desc64[i].length = htole16(MCLBYTES);
1165			ring->desc64[i].flags = htole16(NFE_RX_READY);
1166		} else {
1167			ring->desc32[i].length = htole16(MCLBYTES);
1168			ring->desc32[i].flags = htole16(NFE_RX_READY);
1169		}
1170	}
1171
1172	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1173	    BUS_DMASYNC_PREWRITE);
1174
1175	ring->cur = ring->next = 0;
1176}
1177
1178void
1179nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1180{
1181	struct nfe_rx_data *data;
1182	void *desc;
1183	int i, descsize;
1184
1185	if (sc->sc_flags & NFE_40BIT_ADDR) {
1186		desc = ring->desc64;
1187		descsize = sizeof (struct nfe_desc64);
1188	} else {
1189		desc = ring->desc32;
1190		descsize = sizeof (struct nfe_desc32);
1191	}
1192
1193	if (desc != NULL) {
1194		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1195		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1196		bus_dmamap_unload(sc->sc_dmat, ring->map);
1197		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1198		    NFE_RX_RING_COUNT * descsize);
1199		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1200	}
1201
1202	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1203		data = &ring->data[i];
1204
1205		if (data->m != NULL) {
1206			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1207			    data->map->dm_mapsize,
1208			    BUS_DMASYNC_POSTREAD);
1209			bus_dmamap_unload(sc->sc_dmat, data->map);
1210			m_freem(data->m);
1211		}
1212
1213		if (data->map != NULL)
1214			bus_dmamap_destroy(sc->sc_dmat, data->map);
1215	}
1216}
1217
1218int
1219nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1220{
1221	int i, nsegs, error;
1222	void **desc;
1223	int descsize;
1224
1225	if (sc->sc_flags & NFE_40BIT_ADDR) {
1226		desc = (void **)&ring->desc64;
1227		descsize = sizeof (struct nfe_desc64);
1228	} else {
1229		desc = (void **)&ring->desc32;
1230		descsize = sizeof (struct nfe_desc32);
1231	}
1232
1233	ring->queued = 0;
1234	ring->cur = ring->next = 0;
1235
1236	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1237	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1238
1239	if (error != 0) {
1240		printf("%s: could not create desc DMA map\n",
1241		    sc->sc_dev.dv_xname);
1242		goto fail;
1243	}
1244
1245	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1246	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1247	if (error != 0) {
1248		printf("%s: could not allocate DMA memory\n",
1249		    sc->sc_dev.dv_xname);
1250		goto fail;
1251	}
1252
1253	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1254	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1255	if (error != 0) {
1256		printf("%s: could not map desc DMA memory\n",
1257		    sc->sc_dev.dv_xname);
1258		goto fail;
1259	}
1260
1261	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1262	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1263	if (error != 0) {
1264		printf("%s: could not load desc DMA map\n",
1265		    sc->sc_dev.dv_xname);
1266		goto fail;
1267	}
1268
1269	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1270	ring->physaddr = ring->map->dm_segs->ds_addr;
1271
1272	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1273		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1274		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
1275		    &ring->data[i].map);
1276		if (error != 0) {
1277			printf("%s: could not create DMA map\n",
1278			    sc->sc_dev.dv_xname);
1279			goto fail;
1280		}
1281	}
1282
1283	return 0;
1284
1285fail:	nfe_free_tx_ring(sc, ring);
1286	return error;
1287}
1288
1289void
1290nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1291{
1292	struct nfe_tx_data *data;
1293	int i;
1294
1295	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1296		if (sc->sc_flags & NFE_40BIT_ADDR)
1297			ring->desc64[i].flags = 0;
1298		else
1299			ring->desc32[i].flags = 0;
1300
1301		data = &ring->data[i];
1302
1303		if (data->m != NULL) {
1304			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1305			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1306			bus_dmamap_unload(sc->sc_dmat, data->map);
1307			m_freem(data->m);
1308			data->m = NULL;
1309		}
1310	}
1311
1312	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1313	    BUS_DMASYNC_PREWRITE);
1314
1315	ring->queued = 0;
1316	ring->cur = ring->next = 0;
1317}
1318
1319void
1320nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1321{
1322	struct nfe_tx_data *data;
1323	void *desc;
1324	int i, descsize;
1325
1326	if (sc->sc_flags & NFE_40BIT_ADDR) {
1327		desc = ring->desc64;
1328		descsize = sizeof (struct nfe_desc64);
1329	} else {
1330		desc = ring->desc32;
1331		descsize = sizeof (struct nfe_desc32);
1332	}
1333
1334	if (desc != NULL) {
1335		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1336		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1337		bus_dmamap_unload(sc->sc_dmat, ring->map);
1338		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1339		    NFE_TX_RING_COUNT * descsize);
1340		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1341	}
1342
1343	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1344		data = &ring->data[i];
1345
1346		if (data->m != NULL) {
1347			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1348			    data->map->dm_mapsize,
1349			    BUS_DMASYNC_POSTWRITE);
1350			bus_dmamap_unload(sc->sc_dmat, data->map);
1351			m_freem(data->m);
1352		}
1353
1354		if (data->map != NULL)
1355			bus_dmamap_destroy(sc->sc_dmat, data->map);
1356	}
1357}
1358
1359int
1360nfe_ifmedia_upd(struct ifnet *ifp)
1361{
1362	struct nfe_softc *sc = ifp->if_softc;
1363	struct mii_data *mii = &sc->sc_mii;
1364	struct mii_softc *miisc;
1365
1366	if (mii->mii_instance != 0) {
1367		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1368			mii_phy_reset(miisc);
1369	}
1370	return mii_mediachg(mii);
1371}
1372
1373void
1374nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1375{
1376	struct nfe_softc *sc = ifp->if_softc;
1377	struct mii_data *mii = &sc->sc_mii;
1378
1379	mii_pollstat(mii);
1380	ifmr->ifm_status = mii->mii_media_status;
1381	ifmr->ifm_active = mii->mii_media_active;
1382}
1383
1384void
1385nfe_setmulti(struct nfe_softc *sc)
1386{
1387	struct arpcom *ac = &sc->sc_arpcom;
1388	struct ifnet *ifp = &ac->ac_if;
1389	struct ether_multi *enm;
1390	struct ether_multistep step;
1391	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1392	uint32_t filter = NFE_RXFILTER_MAGIC;
1393	int i;
1394
1395	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1396		bzero(addr, ETHER_ADDR_LEN);
1397		bzero(mask, ETHER_ADDR_LEN);
1398		goto done;
1399	}
1400
1401	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1402	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1403
1404	ETHER_FIRST_MULTI(step, ac, enm);
1405	while (enm != NULL) {
1406		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1407			ifp->if_flags |= IFF_ALLMULTI;
1408			bzero(addr, ETHER_ADDR_LEN);
1409			bzero(mask, ETHER_ADDR_LEN);
1410			goto done;
1411		}
1412		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1413			addr[i] &=  enm->enm_addrlo[i];
1414			mask[i] &= ~enm->enm_addrlo[i];
1415		}
1416		ETHER_NEXT_MULTI(step, enm);
1417	}
1418	for (i = 0; i < ETHER_ADDR_LEN; i++)
1419		mask[i] |= addr[i];
1420
1421done:
1422	addr[0] |= 0x01;	/* make sure multicast bit is set */
1423
1424	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1425	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1426	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1427	    addr[5] <<  8 | addr[4]);
1428	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1429	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1430	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1431	    mask[5] <<  8 | mask[4]);
1432
1433	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1434	NFE_WRITE(sc, NFE_RXFILTER, filter);
1435}
1436
1437void
1438nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1439{
1440	uint32_t tmp;
1441
1442	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1443	addr[0] = (tmp >> 8) & 0xff;
1444	addr[1] = (tmp & 0xff);
1445
1446	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1447	addr[2] = (tmp >> 24) & 0xff;
1448	addr[3] = (tmp >> 16) & 0xff;
1449	addr[4] = (tmp >>  8) & 0xff;
1450	addr[5] = (tmp & 0xff);
1451}
1452
1453void
1454nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1455{
1456	NFE_WRITE(sc, NFE_MACADDR_LO,
1457	    addr[5] <<  8 | addr[4]);
1458	NFE_WRITE(sc, NFE_MACADDR_HI,
1459	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1460}
1461
1462void
1463nfe_tick(void *arg)
1464{
1465	struct nfe_softc *sc = arg;
1466	int s;
1467
1468	s = splnet();
1469	mii_tick(&sc->sc_mii);
1470	splx(s);
1471
1472	timeout_add(&sc->sc_tick_ch, hz);
1473}
1474