if_nfe.c revision 1.24
1/*	$OpenBSD: if_nfe.c,v 1.24 2006/02/11 09:15:57 damien Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for nvidia nForce Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/malloc.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_media.h>
41
42#ifdef INET
43#include <netinet/in.h>
44#include <netinet/in_systm.h>
45#include <netinet/in_var.h>
46#include <netinet/ip.h>
47#include <netinet/if_ether.h>
48#endif
49
50#if NVLAN > 0
51#include <net/if_types.h>
52#include <net/if_vlan_var.h>
53#endif
54
55#if NBPFILTER > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64#include <dev/pci/pcidevs.h>
65
66#include <dev/pci/if_nfereg.h>
67#include <dev/pci/if_nfevar.h>
68
69int	nfe_match(struct device *, void *, void *);
70void	nfe_attach(struct device *, struct device *, void *);
71void	nfe_power(int, void *);
72void	nfe_miibus_statchg(struct device *);
73int	nfe_miibus_readreg(struct device *, int, int);
74void	nfe_miibus_writereg(struct device *, int, int, int);
75int	nfe_intr(void *);
76int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
77void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
78void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
79void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81void	nfe_rxeof(struct nfe_softc *);
82void	nfe_txeof(struct nfe_softc *);
83int	nfe_encap(struct nfe_softc *, struct mbuf *);
84void	nfe_start(struct ifnet *);
85void	nfe_watchdog(struct ifnet *);
86int	nfe_init(struct ifnet *);
87void	nfe_stop(struct ifnet *, int);
88int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
89void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
90void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
91int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
92void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
93void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
94int	nfe_ifmedia_upd(struct ifnet *);
95void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96void	nfe_setmulti(struct nfe_softc *);
97void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
98void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
99void	nfe_tick(void *);
100
101struct cfattach nfe_ca = {
102	sizeof (struct nfe_softc), nfe_match, nfe_attach
103};
104
105struct cfdriver nfe_cd = {
106	NULL, "nfe", DV_IFNET
107};
108
109#define NFE_DEBUG
110
111#ifdef NFE_DEBUG
112int nfedebug = 1;
113#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
114#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
115#else
116#define DPRINTF(x)
117#define DPRINTFN(n,x)
118#endif
119
120const struct pci_matchid nfe_devices[] = {
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }
136};
137
138int
139nfe_match(struct device *dev, void *match, void *aux)
140{
141	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
142	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
143}
144
145void
146nfe_attach(struct device *parent, struct device *self, void *aux)
147{
148	struct nfe_softc *sc = (struct nfe_softc *)self;
149	struct pci_attach_args *pa = aux;
150	pci_chipset_tag_t pc = pa->pa_pc;
151	pci_intr_handle_t ih;
152	const char *intrstr;
153	struct ifnet *ifp;
154	bus_size_t memsize;
155
156	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
157	    &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) {
158		printf(": can't map mem space\n");
159		return;
160	}
161
162	if (pci_intr_map(pa, &ih) != 0) {
163		printf(": couldn't map interrupt\n");
164		return;
165	}
166
167	intrstr = pci_intr_string(pc, ih);
168	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
169	    sc->sc_dev.dv_xname);
170	if (sc->sc_ih == NULL) {
171		printf(": couldn't establish interrupt");
172		if (intrstr != NULL)
173			printf(" at %s", intrstr);
174		printf("\n");
175		return;
176	}
177	printf(": %s", intrstr);
178
179	sc->sc_dmat = pa->pa_dmat;
180
181	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
182	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
183
184	sc->sc_flags = 0;
185
186	switch (PCI_PRODUCT(pa->pa_id)) {
187	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
188	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
189	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
190	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
191		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
192		break;
193	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
194	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
195		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
196		break;
197	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
198	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
199	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
200	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
201	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
202	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
203		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
204		break;
205	}
206
207	/*
208	 * Allocate Tx and Rx rings.
209	 */
210	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
211		printf("%s: could not allocate Tx ring\n",
212		    sc->sc_dev.dv_xname);
213		return;
214	}
215
216	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
217		printf("%s: could not allocate Rx ring\n",
218		    sc->sc_dev.dv_xname);
219		nfe_free_tx_ring(sc, &sc->txq);
220		return;
221	}
222
223	ifp = &sc->sc_arpcom.ac_if;
224	ifp->if_softc = sc;
225	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226	ifp->if_ioctl = nfe_ioctl;
227	ifp->if_start = nfe_start;
228	ifp->if_watchdog = nfe_watchdog;
229	ifp->if_init = nfe_init;
230	ifp->if_baudrate = IF_Gbps(1);
231	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
232	IFQ_SET_READY(&ifp->if_snd);
233	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
234
235#ifdef NFE_CSUM
236	if (sc->sc_flags & NFE_HW_CSUM) {
237		ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
238		    IFCAP_CSUM_UDPv4;
239	}
240#endif
241
242	sc->sc_mii.mii_ifp = ifp;
243	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
244	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
245	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
246
247	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
248	    nfe_ifmedia_sts);
249	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
250	    MII_OFFSET_ANY, 0);
251	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
252		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
253		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
254		    0, NULL);
255		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
256	} else
257		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
258
259	if_attach(ifp);
260	ether_ifattach(ifp);
261
262	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
263
264	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
265}
266
267void
268nfe_power(int why, void *arg)
269{
270	struct nfe_softc *sc = arg;
271	struct ifnet *ifp;
272
273	if (why == PWR_RESUME) {
274		ifp = &sc->sc_arpcom.ac_if;
275		if (ifp->if_flags & IFF_UP) {
276			ifp->if_flags &= ~IFF_RUNNING;
277			nfe_init(ifp);
278			if (ifp->if_flags & IFF_RUNNING)
279				nfe_start(ifp);
280		}
281	}
282}
283
284void
285nfe_miibus_statchg(struct device *dev)
286{
287	struct nfe_softc *sc = (struct nfe_softc *)dev;
288	struct mii_data *mii = &sc->sc_mii;
289	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
290
291	phy = NFE_READ(sc, NFE_PHY_IFACE);
292	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
293
294	seed = NFE_READ(sc, NFE_RNDSEED);
295	seed &= ~NFE_SEED_MASK;
296
297	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
298		phy  |= NFE_PHY_HDX;	/* half-duplex */
299		misc |= NFE_MISC1_HDX;
300	}
301
302	switch (IFM_SUBTYPE(mii->mii_media_active)) {
303	case IFM_1000_T:	/* full-duplex only */
304		link |= NFE_MEDIA_1000T;
305		seed |= NFE_SEED_1000T;
306		phy  |= NFE_PHY_1000T;
307		break;
308	case IFM_100_TX:
309		link |= NFE_MEDIA_100TX;
310		seed |= NFE_SEED_100TX;
311		phy  |= NFE_PHY_100TX;
312		break;
313	case IFM_10_T:
314		link |= NFE_MEDIA_10T;
315		seed |= NFE_SEED_10T;
316		break;
317	}
318
319	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
320
321	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
322	NFE_WRITE(sc, NFE_MISC1, misc);
323	NFE_WRITE(sc, NFE_LINKSPEED, link);
324}
325
326int
327nfe_miibus_readreg(struct device *dev, int phy, int reg)
328{
329	struct nfe_softc *sc = (struct nfe_softc *)dev;
330	uint32_t val;
331	int ntries;
332
333	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
334
335	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
336		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
337		DELAY(100);
338	}
339
340	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
341
342	for (ntries = 0; ntries < 1000; ntries++) {
343		DELAY(100);
344		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
345			break;
346	}
347	if (ntries == 1000) {
348		DPRINTFN(2, ("timeout waiting for PHY\n"));
349		return 0;
350	}
351
352	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
353		DPRINTFN(2, ("could not read PHY\n"));
354		return 0;
355	}
356
357	val = NFE_READ(sc, NFE_PHY_DATA);
358	if (val != 0xffffffff && val != 0)
359		sc->phyaddr = phy;
360
361	DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val));
362
363	return val;
364}
365
366void
367nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
368{
369	struct nfe_softc *sc = (struct nfe_softc *)dev;
370	uint32_t ctl;
371	int ntries;
372
373	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
374
375	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
376		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
377		DELAY(100);
378	}
379
380	NFE_WRITE(sc, NFE_PHY_DATA, val);
381	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
382	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
383
384	for (ntries = 0; ntries < 1000; ntries++) {
385		DELAY(100);
386		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
387			break;
388	}
389#ifdef NFE_DEBUG
390	if (nfedebug >= 2 && ntries == 1000)
391		printf("could not write to PHY\n");
392#endif
393}
394
395int
396nfe_intr(void *arg)
397{
398	struct nfe_softc *sc = arg;
399	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
400	uint32_t r;
401
402	/* disable interrupts */
403	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
404
405	r = NFE_READ(sc, NFE_IRQ_STATUS);
406	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
407
408	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
409
410	if (r == 0) {
411		/* re-enable interrupts */
412		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
413		return 0;
414	}
415
416	if (r & NFE_IRQ_LINK) {
417		NFE_READ(sc, NFE_PHY_STATUS);
418		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
419		DPRINTF(("link state changed\n"));
420	}
421
422	if (ifp->if_flags & IFF_RUNNING) {
423		/* check Rx ring */
424		nfe_rxeof(sc);
425
426		/* check Tx ring */
427		nfe_txeof(sc);
428	}
429
430	/* re-enable interrupts */
431	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
432
433	return 1;
434}
435
436int
437nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
438{
439	struct nfe_softc *sc = ifp->if_softc;
440	struct ifreq *ifr = (struct ifreq *)data;
441	struct ifaddr *ifa = (struct ifaddr *)data;
442	int s, error = 0;
443
444	s = splnet();
445
446	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
447		splx(s);
448		return error;
449	}
450
451	switch (cmd) {
452	case SIOCSIFADDR:
453		ifp->if_flags |= IFF_UP;
454		nfe_init(ifp);
455		switch (ifa->ifa_addr->sa_family) {
456#ifdef INET
457		case AF_INET:
458			arp_ifinit(&sc->sc_arpcom, ifa);
459			break;
460#endif
461		default:
462			break;
463		}
464		break;
465	case SIOCSIFMTU:
466		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
467			error = EINVAL;
468		else if (ifp->if_mtu != ifr->ifr_mtu)
469			ifp->if_mtu = ifr->ifr_mtu;
470		break;
471	case SIOCSIFFLAGS:
472		if (ifp->if_flags & IFF_UP) {
473			/*
474			 * If only the PROMISC or ALLMULTI flag changes, then
475			 * don't do a full re-init of the chip, just update
476			 * the Rx filter.
477			 */
478			if ((ifp->if_flags & IFF_RUNNING) &&
479			    ((ifp->if_flags ^ sc->sc_if_flags) &
480			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
481				nfe_setmulti(sc);
482			else
483				nfe_init(ifp);
484		} else {
485			if (ifp->if_flags & IFF_RUNNING)
486				nfe_stop(ifp, 1);
487		}
488		sc->sc_if_flags = ifp->if_flags;
489		break;
490	case SIOCADDMULTI:
491	case SIOCDELMULTI:
492		error = (cmd == SIOCADDMULTI) ?
493		    ether_addmulti(ifr, &sc->sc_arpcom) :
494		    ether_delmulti(ifr, &sc->sc_arpcom);
495
496		if (error == ENETRESET) {
497			if (ifp->if_flags & IFF_RUNNING)
498				nfe_setmulti(sc);
499			error = 0;
500		}
501		break;
502	case SIOCSIFMEDIA:
503	case SIOCGIFMEDIA:
504		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
505		break;
506	default:
507		error = EINVAL;
508	}
509
510	splx(s);
511
512	return error;
513}
514
515void
516nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
517{
518	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
519	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
520	    sizeof (struct nfe_desc32), ops);
521}
522
523void
524nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
525{
526	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
527	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
528	    sizeof (struct nfe_desc64), ops);
529}
530
531void
532nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
533{
534	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
535	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
536	    sizeof (struct nfe_desc32), ops);
537}
538
539void
540nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
541{
542	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
543	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
544	    sizeof (struct nfe_desc64), ops);
545}
546
547void
548nfe_rxeof(struct nfe_softc *sc)
549{
550	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
551	struct nfe_desc32 *desc32;
552	struct nfe_desc64 *desc64;
553	struct nfe_rx_data *data;
554	struct mbuf *m, *mnew;
555	uint16_t flags;
556	int error, len;
557
558	for (;;) {
559		data = &sc->rxq.data[sc->rxq.cur];
560
561		if (sc->sc_flags & NFE_40BIT_ADDR) {
562			desc64 = &sc->rxq.desc64[sc->rxq.cur];
563			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
564
565			flags = letoh16(desc64->flags);
566			len = letoh16(desc64->length) & 0x3fff;
567		} else {
568			desc32 = &sc->rxq.desc32[sc->rxq.cur];
569			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
570
571			flags = letoh16(desc32->flags);
572			len = letoh16(desc32->length) & 0x3fff;
573		}
574
575		if (flags & NFE_RX_READY)
576			break;
577
578		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
579			if (!(flags & NFE_RX_VALID_V1))
580				goto skip;
581
582			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
583				flags &= ~NFE_RX_ERROR;
584				len--;	/* fix buffer length */
585			}
586		} else {
587			if (!(flags & NFE_RX_VALID_V2))
588				goto skip;
589
590			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
591				flags &= ~NFE_RX_ERROR;
592				len--;	/* fix buffer length */
593			}
594		}
595
596		if (flags & NFE_RX_ERROR) {
597			ifp->if_ierrors++;
598			goto skip;
599		}
600
601		/*
602		 * Try to allocate a new mbuf for this ring element and load
603		 * it before processing the current mbuf. If the ring element
604		 * cannot be loaded, drop the received packet and reuse the
605		 * old mbuf. In the unlikely case that the old mbuf can't be
606		 * reloaded either, explicitly panic.
607		 */
608		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
609		if (mnew == NULL) {
610			ifp->if_ierrors++;
611			goto skip;
612		}
613
614		MCLGET(mnew, M_DONTWAIT);
615		if (!(mnew->m_flags & M_EXT)) {
616			m_freem(mnew);
617			ifp->if_ierrors++;
618			goto skip;
619		}
620
621		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
622		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
623		bus_dmamap_unload(sc->sc_dmat, data->map);
624
625		error = bus_dmamap_load(sc->sc_dmat, data->map,
626		    mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
627		if (error != 0) {
628			m_freem(mnew);
629
630			/* try to reload the old mbuf */
631			error = bus_dmamap_load(sc->sc_dmat, data->map,
632			    mtod(data->m, void *), MCLBYTES, NULL,
633			    BUS_DMA_NOWAIT);
634			if (error != 0) {
635				/* very unlikely that it will fail... */
636				panic("%s: could not load old rx mbuf",
637				    sc->sc_dev.dv_xname);
638			}
639			ifp->if_ierrors++;
640			goto skip;
641		}
642
643		/*
644		 * New mbuf successfully loaded, update Rx ring and continue
645		 * processing.
646		 */
647		m = data->m;
648		data->m = mnew;
649
650		/* finalize mbuf */
651		m->m_pkthdr.len = m->m_len = len;
652		m->m_pkthdr.rcvif = ifp;
653
654#ifdef notyet
655		if (sc->sc_flags & NFE_HW_CSUM) {
656			if (flags & NFE_RX_IP_CSUMOK)
657				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
658			if (flags & NFE_RX_UDP_CSUMOK)
659				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
660			if (flags & NFE_RX_TCP_CSUMOK)
661				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
662		}
663#elif defined(NFE_CSUM)
664		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
665			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
666#endif
667
668#if NBPFILTER > 0
669		if (ifp->if_bpf)
670			bpf_mtap(ifp->if_bpf, m);
671#endif
672		ifp->if_ipackets++;
673		ether_input_mbuf(ifp, m);
674
675skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
676#if defined(__LP64__)
677			desc64->physaddr[0] =
678			    htole32(data->map->dm_segs->ds_addr >> 32);
679#endif
680			desc64->physaddr[1] =
681			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
682			desc64->flags = htole16(NFE_RX_READY);
683			desc64->length = htole16(MCLBYTES);
684
685			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
686		} else {
687			desc32->physaddr =
688			    htole32(data->map->dm_segs->ds_addr);
689			desc32->flags = htole16(NFE_RX_READY);
690			desc32->length = htole16(MCLBYTES);
691
692			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
693		}
694
695		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
696	}
697}
698
699void
700nfe_txeof(struct nfe_softc *sc)
701{
702	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
703	struct nfe_desc32 *desc32;
704	struct nfe_desc64 *desc64;
705	struct nfe_tx_data *data;
706	uint16_t flags;
707
708	while (sc->txq.next != sc->txq.cur) {
709		data = &sc->txq.data[sc->txq.next];
710
711		if (sc->sc_flags & NFE_40BIT_ADDR) {
712			desc64 = &sc->txq.desc64[sc->txq.next];
713			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
714
715			flags = letoh16(desc64->flags);
716		} else {
717			desc32 = &sc->txq.desc32[sc->txq.next];
718			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
719
720			flags = letoh16(desc32->flags);
721		}
722
723		if (flags & NFE_TX_VALID)
724			break;
725
726		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
727			if (!(flags & NFE_TX_LASTFRAG_V1))
728				goto skip;
729
730			if ((flags & NFE_TX_ERROR_V1) != 0) {
731				DPRINTF(("tx error 0x%04x\n", flags));
732				ifp->if_oerrors++;
733			} else
734				ifp->if_opackets++;
735		} else {
736			if (!(flags & NFE_TX_LASTFRAG_V2))
737				goto skip;
738
739			if ((flags & NFE_TX_ERROR_V2) != 0) {
740				DPRINTF(("tx error 0x%04x\n", flags));
741				ifp->if_oerrors++;
742			} else
743				ifp->if_opackets++;
744		}
745
746		if (data->m == NULL) {	/* should not get there */
747			DPRINTF(("last fragment bit w/o associated mbuf!\n"));
748			goto skip;
749		}
750
751		/* last fragment of the mbuf chain transmitted */
752		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
753		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
754		bus_dmamap_unload(sc->sc_dmat, data->active);
755		m_freem(data->m);
756		data->m = NULL;
757
758skip:		sc->txq.queued--;
759		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
760	}
761
762	ifp->if_timer = 0;
763	ifp->if_flags &= ~IFF_OACTIVE;
764	nfe_start(ifp);
765}
766
767int
768nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
769{
770	struct nfe_desc32 *desc32;
771	struct nfe_desc64 *desc64;
772	struct nfe_tx_data *data;
773	struct mbuf *mnew;
774	bus_dmamap_t map;
775	uint16_t flags = NFE_TX_VALID;
776	int error, i;
777
778	map = sc->txq.data[sc->txq.cur].map;
779
780	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
781	if (error != 0 && error != EFBIG) {
782		printf("%s: could not map mbuf (error %d)\n",
783		    sc->sc_dev.dv_xname, error);
784		m_freem(m0);
785		return error;
786	}
787	if (error != 0) {
788		/* too many fragments, linearize */
789
790		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
791		if (mnew == NULL) {
792			m_freem(m0);
793			return ENOMEM;
794		}
795
796		M_DUP_PKTHDR(mnew, m0);
797		if (m0->m_pkthdr.len > MHLEN) {
798			MCLGET(mnew, M_DONTWAIT);
799			if (!(mnew->m_flags & M_EXT)) {
800				m_freem(m0);
801				m_freem(mnew);
802				return ENOMEM;
803			}
804		}
805
806		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
807		m_freem(m0);
808		mnew->m_len = mnew->m_pkthdr.len;
809		m0 = mnew;
810
811		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
812		    BUS_DMA_NOWAIT);
813		if (error != 0) {
814			printf("%s: could not map mbuf (error %d)\n",
815			    sc->sc_dev.dv_xname, error);
816			m_freem(m0);
817			return error;
818		}
819	}
820
821	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
822		bus_dmamap_unload(sc->sc_dmat, map);
823		return ENOBUFS;
824	}
825
826#ifdef NFE_CSUM
827	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
828		flags |= NFE_TX_IP_CSUM;
829	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
830		flags |= NFE_TX_TCP_CSUM;
831#endif
832
833	for (i = 0; i < map->dm_nsegs; i++) {
834		data = &sc->txq.data[sc->txq.cur];
835
836		if (sc->sc_flags & NFE_40BIT_ADDR) {
837			desc64 = &sc->txq.desc64[sc->txq.cur];
838#if defined(__LP64__)
839			desc64->physaddr[0] =
840			    htole32(map->dm_segs[i].ds_addr >> 32);
841#endif
842			desc64->physaddr[1] =
843			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
844			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
845			desc64->flags = htole16(flags);
846
847			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
848		} else {
849			desc32 = &sc->txq.desc32[sc->txq.cur];
850
851			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
852			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
853			desc32->flags = htole16(flags);
854
855			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
856		}
857
858		/* csum flags belong to the first fragment only */
859		if (map->dm_nsegs > 1)
860			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
861
862		sc->txq.queued++;
863		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
864	}
865
866	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
867	if (sc->sc_flags & NFE_40BIT_ADDR) {
868		flags |= NFE_TX_LASTFRAG_V2;
869
870		desc64->flags = htole16(flags);
871		nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
872	} else {
873		if (sc->sc_flags & NFE_JUMBO_SUP)
874			flags |= NFE_TX_LASTFRAG_V2;
875		else
876			flags |= NFE_TX_LASTFRAG_V1;
877
878		desc32->flags = htole16(flags);
879		nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
880	}
881
882	data->m = m0;
883	data->active = map;
884
885	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
886	    BUS_DMASYNC_PREWRITE);
887
888	return 0;
889}
890
891void
892nfe_start(struct ifnet *ifp)
893{
894	struct nfe_softc *sc = ifp->if_softc;
895	struct mbuf *m0;
896	uint32_t txctl = NFE_RXTX_KICKTX;
897	int pkts = 0;
898
899	for (;;) {
900		IFQ_POLL(&ifp->if_snd, m0);
901		if (m0 == NULL)
902			break;
903
904		if (nfe_encap(sc, m0) != 0) {
905			ifp->if_flags |= IFF_OACTIVE;
906			break;
907		}
908
909		/* packet put in h/w queue, remove from s/w queue */
910		IFQ_DEQUEUE(&ifp->if_snd, m0);
911		pkts++;
912
913#if NBPFILTER > 0
914		if (ifp->if_bpf != NULL)
915			bpf_mtap(ifp->if_bpf, m0);
916#endif
917	}
918	if (pkts == 0)
919		return;
920
921	if (sc->sc_flags & NFE_40BIT_ADDR)
922		txctl |= NFE_RXTX_V3MAGIC;
923	else if (sc->sc_flags & NFE_JUMBO_SUP)
924		txctl |= NFE_RXTX_V2MAGIC;
925
926#ifdef NFE_CSUM
927	if (sc->sc_flags & NFE_HW_CSUM)
928		txctl |= NFE_RXTX_RXCHECK;
929#endif
930
931	/* kick Tx */
932	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
933
934	/*
935	 * Set a timeout in case the chip goes out to lunch.
936	 */
937	ifp->if_timer = 5;
938}
939
940void
941nfe_watchdog(struct ifnet *ifp)
942{
943	struct nfe_softc *sc = ifp->if_softc;
944
945	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
946
947	ifp->if_flags &= ~IFF_RUNNING;
948	nfe_init(ifp);
949
950	ifp->if_oerrors++;
951}
952
953int
954nfe_init(struct ifnet *ifp)
955{
956	struct nfe_softc *sc = ifp->if_softc;
957	uint32_t tmp, rxtxctl;
958
959	if (ifp->if_flags & IFF_RUNNING)
960		return 0;
961
962	nfe_stop(ifp, 0);
963
964	NFE_WRITE(sc, NFE_TX_UNK, 0);
965
966	rxtxctl = NFE_RXTX_BIT2;
967	if (sc->sc_flags & NFE_40BIT_ADDR)
968		rxtxctl |= NFE_RXTX_V3MAGIC;
969	else if (sc->sc_flags & NFE_JUMBO_SUP)
970		rxtxctl |= NFE_RXTX_V2MAGIC;
971#ifdef NFE_CSUM
972	if (sc->sc_flags & NFE_HW_CSUM)
973		rxtxctl |= NFE_RXTX_RXCHECK;
974#endif
975
976	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
977	DELAY(10);
978	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
979
980	NFE_WRITE(sc, NFE_SETUP_R6, 0);
981
982	/* set MAC address */
983	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
984
985	/* tell MAC where rings are in memory */
986	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
987	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
988
989	NFE_WRITE(sc, NFE_RING_SIZE,
990	    (NFE_RX_RING_COUNT - 1) << 16 |
991	    (NFE_TX_RING_COUNT - 1));
992
993	/* force MAC to wakeup */
994	tmp = NFE_READ(sc, NFE_PWR_STATE);
995	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
996	DELAY(10);
997	tmp = NFE_READ(sc, NFE_PWR_STATE);
998	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
999
1000	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1001	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1002	NFE_WRITE(sc, NFE_TIMER_INT, 970);	/* XXX Magic */
1003
1004	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1005	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1006
1007	rxtxctl &= ~NFE_RXTX_BIT2;
1008	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1009	DELAY(10);
1010	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
1011
1012	/* configure media */
1013	mii_mediachg(&sc->sc_mii);
1014
1015	/* set Rx filter */
1016	nfe_setmulti(sc);
1017
1018	/* enable Rx */
1019	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1020
1021	/* enable Tx */
1022	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1023
1024	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1025
1026	/* enable interrupts */
1027	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1028
1029	timeout_add(&sc->sc_tick_ch, hz);
1030
1031	ifp->if_flags |= IFF_RUNNING;
1032	ifp->if_flags &= ~IFF_OACTIVE;
1033
1034	return 0;
1035}
1036
1037void
1038nfe_stop(struct ifnet *ifp, int disable)
1039{
1040	struct nfe_softc *sc = ifp->if_softc;
1041
1042	timeout_del(&sc->sc_tick_ch);
1043
1044	ifp->if_timer = 0;
1045	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1046
1047	mii_down(&sc->sc_mii);
1048
1049	/* abort Tx */
1050	NFE_WRITE(sc, NFE_TX_CTL, 0);
1051
1052	/* disable Rx */
1053	NFE_WRITE(sc, NFE_RX_CTL, 0);
1054
1055	/* disable interrupts */
1056	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1057
1058	/* reset Tx and Rx rings */
1059	nfe_reset_tx_ring(sc, &sc->txq);
1060	nfe_reset_rx_ring(sc, &sc->rxq);
1061}
1062
1063int
1064nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1065{
1066	struct nfe_rx_data *data;
1067	struct nfe_desc32 *desc32;
1068	struct nfe_desc64 *desc64;
1069	void **desc;
1070	int i, nsegs, error, descsize;
1071
1072	if (sc->sc_flags & NFE_40BIT_ADDR) {
1073		desc = (void **)&ring->desc64;
1074		descsize = sizeof (struct nfe_desc64);
1075	} else {
1076		desc = (void **)&ring->desc32;
1077		descsize = sizeof (struct nfe_desc32);
1078	}
1079
1080	ring->cur = ring->next = 0;
1081
1082	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1083	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1084	if (error != 0) {
1085		printf("%s: could not create desc DMA map\n",
1086		    sc->sc_dev.dv_xname);
1087		goto fail;
1088	}
1089
1090	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1091	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1092	if (error != 0) {
1093		printf("%s: could not allocate DMA memory\n",
1094		    sc->sc_dev.dv_xname);
1095		goto fail;
1096	}
1097
1098	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1099	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1100	if (error != 0) {
1101		printf("%s: could not map desc DMA memory\n",
1102		    sc->sc_dev.dv_xname);
1103		goto fail;
1104	}
1105
1106	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1107	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1108	if (error != 0) {
1109		printf("%s: could not load desc DMA map\n",
1110		    sc->sc_dev.dv_xname);
1111		goto fail;
1112	}
1113
1114	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1115	ring->physaddr = ring->map->dm_segs->ds_addr;
1116
1117	/*
1118	 * Pre-allocate Rx buffers and populate Rx ring.
1119	 */
1120	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1121		data = &sc->rxq.data[i];
1122
1123		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1124		    0, BUS_DMA_NOWAIT, &data->map);
1125		if (error != 0) {
1126			printf("%s: could not create DMA map\n",
1127			    sc->sc_dev.dv_xname);
1128			goto fail;
1129		}
1130
1131		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1132		if (data->m == NULL) {
1133			printf("%s: could not allocate rx mbuf\n",
1134			    sc->sc_dev.dv_xname);
1135			error = ENOMEM;
1136			goto fail;
1137		}
1138
1139		MCLGET(data->m, M_DONTWAIT);
1140		if (!(data->m->m_flags & M_EXT)) {
1141			printf("%s: could not allocate rx mbuf cluster\n",
1142			    sc->sc_dev.dv_xname);
1143			error = ENOMEM;
1144			goto fail;
1145		}
1146
1147		error = bus_dmamap_load(sc->sc_dmat, data->map,
1148		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
1149		if (error != 0) {
1150			printf("%s: could not load rx buf DMA map",
1151			    sc->sc_dev.dv_xname);
1152			goto fail;
1153		}
1154
1155		if (sc->sc_flags & NFE_40BIT_ADDR) {
1156			desc64 = &sc->rxq.desc64[i];
1157#if defined(__LP64__)
1158			desc64->physaddr[0] =
1159			    htole32(data->map->dm_segs->ds_addr >> 32);
1160#endif
1161			desc64->physaddr[1] =
1162			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
1163			desc64->length = htole16(MCLBYTES);
1164			desc64->flags = htole16(NFE_RX_READY);
1165		} else {
1166			desc32 = &sc->rxq.desc32[i];
1167			desc32->physaddr =
1168			    htole32(data->map->dm_segs->ds_addr);
1169			desc32->length = htole16(MCLBYTES);
1170			desc32->flags = htole16(NFE_RX_READY);
1171		}
1172	}
1173
1174	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1175	    BUS_DMASYNC_PREWRITE);
1176
1177	return 0;
1178
1179fail:	nfe_free_rx_ring(sc, ring);
1180	return error;
1181}
1182
1183void
1184nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1185{
1186	int i;
1187
1188	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1189		if (sc->sc_flags & NFE_40BIT_ADDR) {
1190			ring->desc64[i].length = htole16(MCLBYTES);
1191			ring->desc64[i].flags = htole16(NFE_RX_READY);
1192		} else {
1193			ring->desc32[i].length = htole16(MCLBYTES);
1194			ring->desc32[i].flags = htole16(NFE_RX_READY);
1195		}
1196	}
1197
1198	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1199	    BUS_DMASYNC_PREWRITE);
1200
1201	ring->cur = ring->next = 0;
1202}
1203
1204void
1205nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1206{
1207	struct nfe_rx_data *data;
1208	void *desc;
1209	int i, descsize;
1210
1211	if (sc->sc_flags & NFE_40BIT_ADDR) {
1212		desc = ring->desc64;
1213		descsize = sizeof (struct nfe_desc64);
1214	} else {
1215		desc = ring->desc32;
1216		descsize = sizeof (struct nfe_desc32);
1217	}
1218
1219	if (desc != NULL) {
1220		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1221		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1222		bus_dmamap_unload(sc->sc_dmat, ring->map);
1223		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1224		    NFE_RX_RING_COUNT * descsize);
1225		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1226	}
1227
1228	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1229		data = &ring->data[i];
1230
1231		if (data->m != NULL) {
1232			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1233			    data->map->dm_mapsize,
1234			    BUS_DMASYNC_POSTREAD);
1235			bus_dmamap_unload(sc->sc_dmat, data->map);
1236			m_freem(data->m);
1237		}
1238
1239		if (data->map != NULL)
1240			bus_dmamap_destroy(sc->sc_dmat, data->map);
1241	}
1242}
1243
1244int
1245nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1246{
1247	int i, nsegs, error;
1248	void **desc;
1249	int descsize;
1250
1251	if (sc->sc_flags & NFE_40BIT_ADDR) {
1252		desc = (void **)&ring->desc64;
1253		descsize = sizeof (struct nfe_desc64);
1254	} else {
1255		desc = (void **)&ring->desc32;
1256		descsize = sizeof (struct nfe_desc32);
1257	}
1258
1259	ring->queued = 0;
1260	ring->cur = ring->next = 0;
1261
1262	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1263	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1264
1265	if (error != 0) {
1266		printf("%s: could not create desc DMA map\n",
1267		    sc->sc_dev.dv_xname);
1268		goto fail;
1269	}
1270
1271	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1272	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1273	if (error != 0) {
1274		printf("%s: could not allocate DMA memory\n",
1275		    sc->sc_dev.dv_xname);
1276		goto fail;
1277	}
1278
1279	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1280	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1281	if (error != 0) {
1282		printf("%s: could not map desc DMA memory\n",
1283		    sc->sc_dev.dv_xname);
1284		goto fail;
1285	}
1286
1287	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1288	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1289	if (error != 0) {
1290		printf("%s: could not load desc DMA map\n",
1291		    sc->sc_dev.dv_xname);
1292		goto fail;
1293	}
1294
1295	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1296	ring->physaddr = ring->map->dm_segs->ds_addr;
1297
1298	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1299		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1300		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
1301		    &ring->data[i].map);
1302		if (error != 0) {
1303			printf("%s: could not create DMA map\n",
1304			    sc->sc_dev.dv_xname);
1305			goto fail;
1306		}
1307	}
1308
1309	return 0;
1310
1311fail:	nfe_free_tx_ring(sc, ring);
1312	return error;
1313}
1314
1315void
1316nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1317{
1318	struct nfe_tx_data *data;
1319	int i;
1320
1321	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1322		if (sc->sc_flags & NFE_40BIT_ADDR)
1323			ring->desc64[i].flags = 0;
1324		else
1325			ring->desc32[i].flags = 0;
1326
1327		data = &ring->data[i];
1328
1329		if (data->m != NULL) {
1330			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1331			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1332			bus_dmamap_unload(sc->sc_dmat, data->map);
1333			m_freem(data->m);
1334			data->m = NULL;
1335		}
1336	}
1337
1338	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1339	    BUS_DMASYNC_PREWRITE);
1340
1341	ring->queued = 0;
1342	ring->cur = ring->next = 0;
1343}
1344
1345void
1346nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1347{
1348	struct nfe_tx_data *data;
1349	void *desc;
1350	int i, descsize;
1351
1352	if (sc->sc_flags & NFE_40BIT_ADDR) {
1353		desc = ring->desc64;
1354		descsize = sizeof (struct nfe_desc64);
1355	} else {
1356		desc = ring->desc32;
1357		descsize = sizeof (struct nfe_desc32);
1358	}
1359
1360	if (desc != NULL) {
1361		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1362		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1363		bus_dmamap_unload(sc->sc_dmat, ring->map);
1364		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1365		    NFE_TX_RING_COUNT * descsize);
1366		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1367	}
1368
1369	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1370		data = &ring->data[i];
1371
1372		if (data->m != NULL) {
1373			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1374			    data->map->dm_mapsize,
1375			    BUS_DMASYNC_POSTWRITE);
1376			bus_dmamap_unload(sc->sc_dmat, data->map);
1377			m_freem(data->m);
1378		}
1379
1380		if (data->map != NULL)
1381			bus_dmamap_destroy(sc->sc_dmat, data->map);
1382	}
1383}
1384
1385int
1386nfe_ifmedia_upd(struct ifnet *ifp)
1387{
1388	struct nfe_softc *sc = ifp->if_softc;
1389	struct mii_data *mii = &sc->sc_mii;
1390	struct mii_softc *miisc;
1391
1392	if (mii->mii_instance != 0) {
1393		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1394			mii_phy_reset(miisc);
1395	}
1396	return mii_mediachg(mii);
1397}
1398
1399void
1400nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1401{
1402	struct nfe_softc *sc = ifp->if_softc;
1403	struct mii_data *mii = &sc->sc_mii;
1404
1405	mii_pollstat(mii);
1406	ifmr->ifm_status = mii->mii_media_status;
1407	ifmr->ifm_active = mii->mii_media_active;
1408}
1409
1410void
1411nfe_setmulti(struct nfe_softc *sc)
1412{
1413	struct arpcom *ac = &sc->sc_arpcom;
1414	struct ifnet *ifp = &ac->ac_if;
1415	struct ether_multi *enm;
1416	struct ether_multistep step;
1417	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1418	uint32_t filter = NFE_RXFILTER_MAGIC;
1419	int i;
1420
1421	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1422		bzero(addr, ETHER_ADDR_LEN);
1423		bzero(mask, ETHER_ADDR_LEN);
1424		goto done;
1425	}
1426
1427	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1428	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1429
1430	ETHER_FIRST_MULTI(step, ac, enm);
1431	while (enm != NULL) {
1432		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1433			ifp->if_flags |= IFF_ALLMULTI;
1434			bzero(addr, ETHER_ADDR_LEN);
1435			bzero(mask, ETHER_ADDR_LEN);
1436			goto done;
1437		}
1438		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1439			addr[i] &=  enm->enm_addrlo[i];
1440			mask[i] &= ~enm->enm_addrlo[i];
1441		}
1442		ETHER_NEXT_MULTI(step, enm);
1443	}
1444	for (i = 0; i < ETHER_ADDR_LEN; i++)
1445		mask[i] |= addr[i];
1446
1447done:
1448	addr[0] |= 0x01;	/* make sure multicast bit is set */
1449
1450	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1451	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1452	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1453	    addr[5] <<  8 | addr[4]);
1454	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1455	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1456	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1457	    mask[5] <<  8 | mask[4]);
1458
1459	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1460	NFE_WRITE(sc, NFE_RXFILTER, filter);
1461}
1462
1463void
1464nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1465{
1466	uint32_t tmp;
1467
1468	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1469	addr[0] = (tmp >> 8) & 0xff;
1470	addr[1] = (tmp & 0xff);
1471
1472	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1473	addr[2] = (tmp >> 24) & 0xff;
1474	addr[3] = (tmp >> 16) & 0xff;
1475	addr[4] = (tmp >>  8) & 0xff;
1476	addr[5] = (tmp & 0xff);
1477}
1478
1479void
1480nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1481{
1482	NFE_WRITE(sc, NFE_MACADDR_LO,
1483	    addr[5] <<  8 | addr[4]);
1484	NFE_WRITE(sc, NFE_MACADDR_HI,
1485	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1486}
1487
1488void
1489nfe_tick(void *arg)
1490{
1491	struct nfe_softc *sc = arg;
1492	int s;
1493
1494	s = splnet();
1495	mii_tick(&sc->sc_mii);
1496	splx(s);
1497
1498	timeout_add(&sc->sc_tick_ch, hz);
1499}
1500