if_nfe.c revision 1.23
1/*	$OpenBSD: if_nfe.c,v 1.23 2006/02/10 03:54:54 brad Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for nvidia nForce Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/malloc.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_media.h>
41
42#ifdef INET
43#include <netinet/in.h>
44#include <netinet/in_systm.h>
45#include <netinet/in_var.h>
46#include <netinet/ip.h>
47#include <netinet/if_ether.h>
48#endif
49
50#if NVLAN > 0
51#include <net/if_types.h>
52#include <net/if_vlan_var.h>
53#endif
54
55#if NBPFILTER > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64#include <dev/pci/pcidevs.h>
65
66#include <dev/pci/if_nfereg.h>
67#include <dev/pci/if_nfevar.h>
68
69int	nfe_match(struct device *, void *, void *);
70void	nfe_attach(struct device *, struct device *, void *);
71void	nfe_power(int, void *);
72void	nfe_miibus_statchg(struct device *);
73int	nfe_miibus_readreg(struct device *, int, int);
74void	nfe_miibus_writereg(struct device *, int, int, int);
75int	nfe_intr(void *);
76int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
77void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
78void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
79void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81void	nfe_rxeof(struct nfe_softc *);
82void	nfe_txeof(struct nfe_softc *);
83int	nfe_encap(struct nfe_softc *, struct mbuf *);
84void	nfe_start(struct ifnet *);
85void	nfe_watchdog(struct ifnet *);
86int	nfe_init(struct ifnet *);
87void	nfe_stop(struct ifnet *, int);
88int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
89void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
90void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
91int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
92void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
93void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
94int	nfe_ifmedia_upd(struct ifnet *);
95void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96void	nfe_setmulti(struct nfe_softc *);
97void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
98void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
99void	nfe_tick(void *);
100
101struct cfattach nfe_ca = {
102	sizeof (struct nfe_softc), nfe_match, nfe_attach
103};
104
105struct cfdriver nfe_cd = {
106	NULL, "nfe", DV_IFNET
107};
108
109#define NFE_DEBUG
110
111#ifdef NFE_DEBUG
112int nfedebug = 1;
113#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
114#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
115#else
116#define DPRINTF(x)
117#define DPRINTFN(n,x)
118#endif
119
120const struct pci_matchid nfe_devices[] = {
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }
136};
137
138int
139nfe_match(struct device *dev, void *match, void *aux)
140{
141	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
142	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
143}
144
145void
146nfe_attach(struct device *parent, struct device *self, void *aux)
147{
148	struct nfe_softc *sc = (struct nfe_softc *)self;
149	struct pci_attach_args *pa = aux;
150	pci_chipset_tag_t pc = pa->pa_pc;
151	pci_intr_handle_t ih;
152	const char *intrstr;
153	struct ifnet *ifp;
154	bus_size_t memsize;
155
156	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
157	    &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) {
158		printf(": can't map mem space\n");
159		return;
160	}
161
162	if (pci_intr_map(pa, &ih) != 0) {
163		printf(": couldn't map interrupt\n");
164		return;
165	}
166
167	intrstr = pci_intr_string(pc, ih);
168	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
169	    sc->sc_dev.dv_xname);
170	if (sc->sc_ih == NULL) {
171		printf(": couldn't establish interrupt");
172		if (intrstr != NULL)
173			printf(" at %s", intrstr);
174		printf("\n");
175		return;
176	}
177	printf(": %s", intrstr);
178
179	sc->sc_dmat = pa->pa_dmat;
180
181	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
182	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
183
184	sc->sc_flags = 0;
185
186	switch (PCI_PRODUCT(pa->pa_id)) {
187	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
188	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
189	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
190	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
191		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
192		break;
193	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
194	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
195		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
196		break;
197	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
198	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
199	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
200	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
201	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
202	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
203		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
204		break;
205	}
206
207	/*
208	 * Allocate Tx and Rx rings.
209	 */
210	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
211		printf("%s: could not allocate Tx ring\n",
212		    sc->sc_dev.dv_xname);
213		return;
214	}
215
216	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
217		printf("%s: could not allocate Rx ring\n",
218		    sc->sc_dev.dv_xname);
219		nfe_free_tx_ring(sc, &sc->txq);
220		return;
221	}
222
223	ifp = &sc->sc_arpcom.ac_if;
224	ifp->if_softc = sc;
225	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226	ifp->if_ioctl = nfe_ioctl;
227	ifp->if_start = nfe_start;
228	ifp->if_watchdog = nfe_watchdog;
229	ifp->if_init = nfe_init;
230	ifp->if_baudrate = IF_Gbps(1);
231	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
232	IFQ_SET_READY(&ifp->if_snd);
233	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
234
235#ifdef NFE_CSUM
236	if (sc->sc_flags & NFE_HW_CSUM) {
237		ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
238		    IFCAP_CSUM_UDPv4;
239	}
240#endif
241
242	sc->sc_mii.mii_ifp = ifp;
243	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
244	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
245	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
246
247	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
248	    nfe_ifmedia_sts);
249	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
250	    MII_OFFSET_ANY, 0);
251	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
252		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
253		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
254		    0, NULL);
255		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
256	} else
257		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
258
259	if_attach(ifp);
260	ether_ifattach(ifp);
261
262	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
263
264	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
265}
266
267void
268nfe_power(int why, void *arg)
269{
270	struct nfe_softc *sc = arg;
271	struct ifnet *ifp;
272
273	if (why == PWR_RESUME) {
274		ifp = &sc->sc_arpcom.ac_if;
275		if (ifp->if_flags & IFF_UP) {
276			ifp->if_flags &= ~IFF_RUNNING;
277			nfe_init(ifp);
278			if (ifp->if_flags & IFF_RUNNING)
279				nfe_start(ifp);
280		}
281	}
282}
283
284void
285nfe_miibus_statchg(struct device *dev)
286{
287	struct nfe_softc *sc = (struct nfe_softc *)dev;
288	struct mii_data *mii = &sc->sc_mii;
289	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
290
291	phy = NFE_READ(sc, NFE_PHY_IFACE);
292	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
293
294	seed = NFE_READ(sc, NFE_RNDSEED);
295	seed &= ~NFE_SEED_MASK;
296
297	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
298		phy  |= NFE_PHY_HDX;	/* half-duplex */
299		misc |= NFE_MISC1_HDX;
300	}
301
302	switch (IFM_SUBTYPE(mii->mii_media_active)) {
303	case IFM_1000_T:	/* full-duplex only */
304		link |= NFE_MEDIA_1000T;
305		seed |= NFE_SEED_1000T;
306		phy  |= NFE_PHY_1000T;
307		break;
308	case IFM_100_TX:
309		link |= NFE_MEDIA_100TX;
310		seed |= NFE_SEED_100TX;
311		phy  |= NFE_PHY_100TX;
312		break;
313	case IFM_10_T:
314		link |= NFE_MEDIA_10T;
315		seed |= NFE_SEED_10T;
316		break;
317	}
318
319	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
320
321	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
322	NFE_WRITE(sc, NFE_MISC1, misc);
323	NFE_WRITE(sc, NFE_LINKSPEED, link);
324}
325
326int
327nfe_miibus_readreg(struct device *dev, int phy, int reg)
328{
329	struct nfe_softc *sc = (struct nfe_softc *)dev;
330	uint32_t val;
331	int ntries;
332
333	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
334
335	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
336		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
337		DELAY(100);
338	}
339
340	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
341
342	for (ntries = 0; ntries < 1000; ntries++) {
343		DELAY(100);
344		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
345			break;
346	}
347	if (ntries == 1000) {
348		DPRINTFN(2, ("timeout waiting for PHY\n"));
349		return 0;
350	}
351
352	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
353		DPRINTFN(2, ("could not read PHY\n"));
354		return 0;
355	}
356
357	val = NFE_READ(sc, NFE_PHY_DATA);
358	if (val != 0xffffffff && val != 0)
359		sc->phyaddr = phy;
360
361	DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val));
362
363	return val;
364}
365
366void
367nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
368{
369	struct nfe_softc *sc = (struct nfe_softc *)dev;
370	uint32_t ctl;
371	int ntries;
372
373	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
374
375	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
376		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
377		DELAY(100);
378	}
379
380	NFE_WRITE(sc, NFE_PHY_DATA, val);
381	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
382	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
383
384	for (ntries = 0; ntries < 1000; ntries++) {
385		DELAY(100);
386		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
387			break;
388	}
389#ifdef NFE_DEBUG
390	if (nfedebug >= 2 && ntries == 1000)
391		printf("could not write to PHY\n");
392#endif
393}
394
395int
396nfe_intr(void *arg)
397{
398	struct nfe_softc *sc = arg;
399	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
400	uint32_t r;
401
402	/* disable interrupts */
403	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
404
405	r = NFE_READ(sc, NFE_IRQ_STATUS);
406	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
407
408	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
409
410	if (r == 0) {
411		/* re-enable interrupts */
412		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
413		return 0;
414	}
415
416	if (r & NFE_IRQ_LINK) {
417		NFE_READ(sc, NFE_PHY_STATUS);
418		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
419		DPRINTF(("link state changed\n"));
420	}
421
422	if (ifp->if_flags & IFF_RUNNING) {
423		/* check Rx ring */
424		nfe_rxeof(sc);
425
426		/* check Tx ring */
427		nfe_txeof(sc);
428	}
429
430	/* re-enable interrupts */
431	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
432
433	return 1;
434}
435
436int
437nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
438{
439	struct nfe_softc *sc = ifp->if_softc;
440	struct ifreq *ifr = (struct ifreq *)data;
441	struct ifaddr *ifa = (struct ifaddr *)data;
442	int s, error = 0;
443
444	s = splnet();
445
446	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
447		splx(s);
448		return error;
449	}
450
451	switch (cmd) {
452	case SIOCSIFADDR:
453		ifp->if_flags |= IFF_UP;
454		nfe_init(ifp);
455		switch (ifa->ifa_addr->sa_family) {
456#ifdef INET
457		case AF_INET:
458			arp_ifinit(&sc->sc_arpcom, ifa);
459			break;
460#endif
461		default:
462			break;
463		}
464		break;
465	case SIOCSIFMTU:
466		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
467			error = EINVAL;
468		else if (ifp->if_mtu != ifr->ifr_mtu)
469			ifp->if_mtu = ifr->ifr_mtu;
470		break;
471	case SIOCSIFFLAGS:
472		if (ifp->if_flags & IFF_UP) {
473			/*
474			 * If only the PROMISC or ALLMULTI flag changes, then
475			 * don't do a full re-init of the chip, just update
476			 * the Rx filter.
477			 */
478			if ((ifp->if_flags & IFF_RUNNING) &&
479			    ((ifp->if_flags ^ sc->sc_if_flags) &
480			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
481				nfe_setmulti(sc);
482			else
483				nfe_init(ifp);
484		} else {
485			if (ifp->if_flags & IFF_RUNNING)
486				nfe_stop(ifp, 1);
487		}
488		sc->sc_if_flags = ifp->if_flags;
489		break;
490	case SIOCADDMULTI:
491	case SIOCDELMULTI:
492		error = (cmd == SIOCADDMULTI) ?
493		    ether_addmulti(ifr, &sc->sc_arpcom) :
494		    ether_delmulti(ifr, &sc->sc_arpcom);
495
496		if (error == ENETRESET) {
497			if (ifp->if_flags & IFF_RUNNING)
498				nfe_setmulti(sc);
499			error = 0;
500		}
501		break;
502	case SIOCSIFMEDIA:
503	case SIOCGIFMEDIA:
504		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
505		break;
506	default:
507		error = EINVAL;
508	}
509
510	splx(s);
511
512	return error;
513}
514
515void
516nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
517{
518	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
519	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
520	    sizeof (struct nfe_desc32), ops);
521}
522
523void
524nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
525{
526	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
527	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
528	    sizeof (struct nfe_desc64), ops);
529}
530
531void
532nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
533{
534	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
535	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
536	    sizeof (struct nfe_desc32), ops);
537}
538
539void
540nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
541{
542	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
543	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
544	    sizeof (struct nfe_desc64), ops);
545}
546
547void
548nfe_rxeof(struct nfe_softc *sc)
549{
550	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
551	struct nfe_desc32 *desc32;
552	struct nfe_desc64 *desc64;
553	struct nfe_rx_data *data;
554	struct mbuf *m, *mnew;
555	uint16_t flags;
556	int error, len;
557
558	for (;;) {
559		data = &sc->rxq.data[sc->rxq.cur];
560
561		if (sc->sc_flags & NFE_40BIT_ADDR) {
562			desc64 = &sc->rxq.desc64[sc->rxq.cur];
563			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
564
565			flags = letoh16(desc64->flags);
566			len = letoh16(desc64->length) & 0x3fff;
567		} else {
568			desc32 = &sc->rxq.desc32[sc->rxq.cur];
569			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
570
571			flags = letoh16(desc32->flags);
572			len = letoh16(desc32->length) & 0x3fff;
573		}
574
575		if (flags & NFE_RX_READY)
576			break;
577
578		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
579			if (!(flags & NFE_RX_VALID_V1))
580				goto skip;
581
582			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
583				flags &= ~NFE_RX_ERROR;
584				len--;	/* fix buffer length */
585			}
586		} else {
587			if (!(flags & NFE_RX_VALID_V2))
588				goto skip;
589
590			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
591				flags &= ~NFE_RX_ERROR;
592				len--;	/* fix buffer length */
593			}
594		}
595
596		if (flags & NFE_RX_ERROR) {
597			ifp->if_ierrors++;
598			goto skip;
599		}
600
601		/*
602		 * Try to allocate a new mbuf for this ring element and load
603		 * it before processing the current mbuf. If the ring element
604		 * cannot be loaded, drop the received packet and reuse the
605		 * old mbuf. In the unlikely case that the old mbuf can't be
606		 * reloaded either, explicitly panic.
607		 */
608		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
609		if (mnew == NULL) {
610			ifp->if_ierrors++;
611			goto skip;
612		}
613
614		MCLGET(mnew, M_DONTWAIT);
615		if (!(mnew->m_flags & M_EXT)) {
616			m_freem(mnew);
617			ifp->if_ierrors++;
618			goto skip;
619		}
620
621		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
622		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
623		bus_dmamap_unload(sc->sc_dmat, data->map);
624
625		error = bus_dmamap_load(sc->sc_dmat, data->map,
626		    mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
627		if (error != 0) {
628			m_freem(mnew);
629
630			/* try to reload the old mbuf */
631			error = bus_dmamap_load(sc->sc_dmat, data->map,
632			    mtod(data->m, void *), MCLBYTES, NULL,
633			    BUS_DMA_NOWAIT);
634			if (error != 0) {
635				/* very unlikely that it will fail... */
636				panic("%s: could not load old rx mbuf",
637				    sc->sc_dev.dv_xname);
638			}
639			ifp->if_ierrors++;
640			goto skip;
641		}
642
643		/*
644		 * New mbuf successfully loaded, update Rx ring and continue
645		 * processing.
646		 */
647		m = data->m;
648		data->m = mnew;
649
650		/* finalize mbuf */
651		m->m_pkthdr.len = m->m_len = len;
652		m->m_pkthdr.rcvif = ifp;
653
654#ifdef notyet
655		if (sc->sc_flags & NFE_HW_CSUM) {
656			if (flags & NFE_RX_IP_CSUMOK)
657				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
658			if (flags & NFE_RX_UDP_CSUMOK)
659				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
660			if (flags & NFE_RX_TCP_CSUMOK)
661				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
662		}
663#elif defined(NFE_CSUM)
664		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
665			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
666#endif
667
668#if NBPFILTER > 0
669		if (ifp->if_bpf)
670			bpf_mtap(ifp->if_bpf, m);
671#endif
672		ifp->if_ipackets++;
673		ether_input_mbuf(ifp, m);
674
675skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
676#if defined(__LP64__)
677			desc64->physaddr[0] =
678			    htole32(data->map->dm_segs->ds_addr >> 32);
679#endif
680			desc64->physaddr[1] =
681			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
682			desc64->flags = htole16(NFE_RX_READY);
683			desc64->length = htole16(MCLBYTES);
684
685			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
686		} else {
687			desc32->physaddr =
688			    htole32(data->map->dm_segs->ds_addr);
689			desc32->flags = htole16(NFE_RX_READY);
690			desc32->length = htole16(MCLBYTES);
691
692			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
693		}
694
695		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
696	}
697}
698
699void
700nfe_txeof(struct nfe_softc *sc)
701{
702	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
703	struct nfe_desc32 *desc32;
704	struct nfe_desc64 *desc64;
705	struct nfe_tx_data *data;
706	uint16_t flags;
707
708	while (sc->txq.next != sc->txq.cur) {
709		data = &sc->txq.data[sc->txq.next];
710
711		if (sc->sc_flags & NFE_40BIT_ADDR) {
712			desc64 = &sc->txq.desc64[sc->txq.next];
713			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
714
715			flags = letoh16(desc64->flags);
716		} else {
717			desc32 = &sc->txq.desc32[sc->txq.next];
718			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
719
720			flags = letoh16(desc32->flags);
721		}
722
723		if (flags & NFE_TX_VALID)
724			break;
725
726		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
727			if (!(flags & NFE_TX_LASTFRAG_V1))
728				goto skip;
729
730			if ((flags & NFE_TX_ERROR_V1) != 0) {
731				DPRINTF(("tx error 0x%04x\n", flags));
732				ifp->if_oerrors++;
733			} else
734				ifp->if_opackets++;
735		} else {
736			if (!(flags & NFE_TX_LASTFRAG_V2))
737				goto skip;
738
739			if ((flags & NFE_TX_ERROR_V2) != 0) {
740				DPRINTF(("tx error 0x%04x\n", flags));
741				ifp->if_oerrors++;
742			} else
743				ifp->if_opackets++;
744		}
745
746		if (data->m == NULL) {	/* should not get there */
747			DPRINTF(("last fragment bit w/o associated mbuf!\n"));
748			goto skip;
749		}
750
751		/* last fragment of the mbuf chain transmitted */
752		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
753		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
754		bus_dmamap_unload(sc->sc_dmat, data->active);
755		m_freem(data->m);
756		data->m = NULL;
757
758skip:		sc->txq.queued--;
759		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
760	}
761
762	ifp->if_timer = 0;
763	ifp->if_flags &= ~IFF_OACTIVE;
764	nfe_start(ifp);
765}
766
767int
768nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
769{
770	struct nfe_desc32 *desc32;
771	struct nfe_desc64 *desc64;
772	struct nfe_tx_data *data;
773	struct mbuf *mnew;
774	bus_dmamap_t map;
775	uint16_t flags = NFE_TX_VALID;
776	int error, i;
777
778	map = sc->txq.data[sc->txq.cur].map;
779
780	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
781	if (error != 0 && error != EFBIG) {
782		printf("%s: could not map mbuf (error %d)\n",
783		    sc->sc_dev.dv_xname, error);
784		m_freem(m0);
785		return error;
786	}
787	if (error != 0) {
788		/* too many fragments, linearize */
789
790		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
791		if (mnew == NULL) {
792			m_freem(m0);
793			return ENOMEM;
794		}
795
796		M_DUP_PKTHDR(mnew, m0);
797		if (m0->m_pkthdr.len > MHLEN) {
798			MCLGET(mnew, M_DONTWAIT);
799			if (!(mnew->m_flags & M_EXT)) {
800				m_freem(m0);
801				m_freem(mnew);
802				return ENOMEM;
803			}
804		}
805
806		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
807		m_freem(m0);
808		mnew->m_len = mnew->m_pkthdr.len;
809		m0 = mnew;
810
811		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
812		    BUS_DMA_NOWAIT);
813		if (error != 0) {
814			printf("%s: could not map mbuf (error %d)\n",
815			    sc->sc_dev.dv_xname, error);
816			m_freem(m0);
817			return error;
818		}
819	}
820
821	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
822		bus_dmamap_unload(sc->sc_dmat, map);
823		return ENOBUFS;
824	}
825
826#ifdef NFE_CSUM
827	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
828		flags |= NFE_TX_IP_CSUM;
829	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
830		flags |= NFE_TX_TCP_CSUM;
831#endif
832
833	for (i = 0; i < map->dm_nsegs; i++) {
834		data = &sc->txq.data[sc->txq.cur];
835
836		if (sc->sc_flags & NFE_40BIT_ADDR) {
837			desc64 = &sc->txq.desc64[sc->txq.cur];
838#if defined(__LP64__)
839			desc64->physaddr[0] =
840			    htole32(map->dm_segs[i].ds_addr >> 32);
841#endif
842			desc64->physaddr[1] =
843			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
844			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
845			desc64->flags = htole16(flags);
846
847			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
848		} else {
849			desc32 = &sc->txq.desc32[sc->txq.cur];
850
851			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
852			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
853			desc32->flags = htole16(flags);
854
855			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
856		}
857
858		/* csum flags belong to the first fragment only */
859		if (map->dm_nsegs > 1)
860			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
861
862		sc->txq.queued++;
863		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
864	}
865
866	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
867	if (sc->sc_flags & NFE_40BIT_ADDR) {
868		flags |= NFE_TX_LASTFRAG_V2;
869
870		desc64->flags = htole16(flags);
871		nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
872	} else {
873		if (sc->sc_flags & NFE_JUMBO_SUP)
874			flags |= NFE_TX_LASTFRAG_V2;
875		else
876			flags |= NFE_TX_LASTFRAG_V1;
877
878		desc32->flags = htole16(flags);
879		nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
880	}
881
882	data->m = m0;
883	data->active = map;
884
885	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
886	    BUS_DMASYNC_PREWRITE);
887
888	return 0;
889}
890
891void
892nfe_start(struct ifnet *ifp)
893{
894	struct nfe_softc *sc = ifp->if_softc;
895	struct mbuf *m0;
896	uint32_t txctl = NFE_RXTX_KICKTX;
897	int pkts = 0;
898
899	for (;;) {
900		IFQ_POLL(&ifp->if_snd, m0);
901		if (m0 == NULL)
902			break;
903
904		if (nfe_encap(sc, m0) != 0) {
905			ifp->if_flags |= IFF_OACTIVE;
906			break;
907		}
908
909		/* packet put in h/w queue, remove from s/w queue */
910		IFQ_DEQUEUE(&ifp->if_snd, m0);
911		pkts++;
912
913#if NBPFILTER > 0
914		if (ifp->if_bpf != NULL)
915			bpf_mtap(ifp->if_bpf, m0);
916#endif
917	}
918	if (pkts == 0)
919		return;
920
921	if (sc->sc_flags & NFE_40BIT_ADDR)
922		txctl |= NFE_RXTX_V3MAGIC;
923	else if (sc->sc_flags & NFE_JUMBO_SUP)
924		txctl |= NFE_RXTX_V2MAGIC;
925
926#ifdef NFE_CSUM
927	if (sc->sc_flags & NFE_HW_CSUM)
928		txctl |= NFE_RXTX_RXCHECK;
929#endif
930
931	/* kick Tx */
932	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
933
934	/*
935	 * Set a timeout in case the chip goes out to lunch.
936	 */
937	ifp->if_timer = 5;
938}
939
940void
941nfe_watchdog(struct ifnet *ifp)
942{
943	struct nfe_softc *sc = ifp->if_softc;
944
945	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
946
947	ifp->if_flags &= ~IFF_RUNNING;
948	nfe_init(ifp);
949
950	ifp->if_oerrors++;
951}
952
953int
954nfe_init(struct ifnet *ifp)
955{
956	struct nfe_softc *sc = ifp->if_softc;
957	uint32_t rxtxctl;
958
959	if (ifp->if_flags & IFF_RUNNING)
960		return 0;
961
962	nfe_stop(ifp, 0);
963
964	NFE_WRITE(sc, NFE_TX_UNK, 0);
965
966	rxtxctl = NFE_RXTX_BIT2;
967	if (sc->sc_flags & NFE_40BIT_ADDR)
968		rxtxctl |= NFE_RXTX_V3MAGIC;
969	else if (sc->sc_flags & NFE_JUMBO_SUP)
970		rxtxctl |= NFE_RXTX_V2MAGIC;
971#ifdef NFE_CSUM
972	if (sc->sc_flags & NFE_HW_CSUM)
973		rxtxctl |= NFE_RXTX_RXCHECK;
974#endif
975
976	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
977	DELAY(10);
978	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
979
980	NFE_WRITE(sc, NFE_SETUP_R6, 0);
981
982	/* set MAC address */
983	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
984
985	/* tell MAC where rings are in memory */
986	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
987	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
988
989	NFE_WRITE(sc, NFE_RING_SIZE,
990	    (NFE_RX_RING_COUNT - 1) << 16 |
991	    (NFE_TX_RING_COUNT - 1));
992
993	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
994	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
995	NFE_WRITE(sc, NFE_TIMER_INT, 970);	/* XXX Magic */
996
997	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
998	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
999
1000	rxtxctl &= ~NFE_RXTX_BIT2;
1001	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1002	DELAY(10);
1003	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
1004
1005	/* configure media */
1006	mii_mediachg(&sc->sc_mii);
1007
1008	/* set Rx filter */
1009	nfe_setmulti(sc);
1010
1011	/* enable Rx */
1012	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1013
1014	/* enable Tx */
1015	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1016
1017	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1018
1019	/* enable interrupts */
1020	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1021
1022	timeout_add(&sc->sc_tick_ch, hz);
1023
1024	ifp->if_flags |= IFF_RUNNING;
1025	ifp->if_flags &= ~IFF_OACTIVE;
1026
1027	return 0;
1028}
1029
1030void
1031nfe_stop(struct ifnet *ifp, int disable)
1032{
1033	struct nfe_softc *sc = ifp->if_softc;
1034
1035	timeout_del(&sc->sc_tick_ch);
1036
1037	ifp->if_timer = 0;
1038	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1039
1040	mii_down(&sc->sc_mii);
1041
1042	/* abort Tx */
1043	NFE_WRITE(sc, NFE_TX_CTL, 0);
1044
1045	/* disable Rx */
1046	NFE_WRITE(sc, NFE_RX_CTL, 0);
1047
1048	/* disable interrupts */
1049	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1050
1051	/* reset Tx and Rx rings */
1052	nfe_reset_tx_ring(sc, &sc->txq);
1053	nfe_reset_rx_ring(sc, &sc->rxq);
1054}
1055
1056int
1057nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1058{
1059	struct nfe_rx_data *data;
1060	struct nfe_desc32 *desc32;
1061	struct nfe_desc64 *desc64;
1062	void **desc;
1063	int i, nsegs, error, descsize;
1064
1065	if (sc->sc_flags & NFE_40BIT_ADDR) {
1066		desc = (void **)&ring->desc64;
1067		descsize = sizeof (struct nfe_desc64);
1068	} else {
1069		desc = (void **)&ring->desc32;
1070		descsize = sizeof (struct nfe_desc32);
1071	}
1072
1073	ring->cur = ring->next = 0;
1074
1075	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1076	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1077	if (error != 0) {
1078		printf("%s: could not create desc DMA map\n",
1079		    sc->sc_dev.dv_xname);
1080		goto fail;
1081	}
1082
1083	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1084	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1085	if (error != 0) {
1086		printf("%s: could not allocate DMA memory\n",
1087		    sc->sc_dev.dv_xname);
1088		goto fail;
1089	}
1090
1091	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1092	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1093	if (error != 0) {
1094		printf("%s: could not map desc DMA memory\n",
1095		    sc->sc_dev.dv_xname);
1096		goto fail;
1097	}
1098
1099	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1100	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1101	if (error != 0) {
1102		printf("%s: could not load desc DMA map\n",
1103		    sc->sc_dev.dv_xname);
1104		goto fail;
1105	}
1106
1107	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1108	ring->physaddr = ring->map->dm_segs->ds_addr;
1109
1110	/*
1111	 * Pre-allocate Rx buffers and populate Rx ring.
1112	 */
1113	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1114		data = &sc->rxq.data[i];
1115
1116		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1117		    0, BUS_DMA_NOWAIT, &data->map);
1118		if (error != 0) {
1119			printf("%s: could not create DMA map\n",
1120			    sc->sc_dev.dv_xname);
1121			goto fail;
1122		}
1123
1124		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1125		if (data->m == NULL) {
1126			printf("%s: could not allocate rx mbuf\n",
1127			    sc->sc_dev.dv_xname);
1128			error = ENOMEM;
1129			goto fail;
1130		}
1131
1132		MCLGET(data->m, M_DONTWAIT);
1133		if (!(data->m->m_flags & M_EXT)) {
1134			printf("%s: could not allocate rx mbuf cluster\n",
1135			    sc->sc_dev.dv_xname);
1136			error = ENOMEM;
1137			goto fail;
1138		}
1139
1140		error = bus_dmamap_load(sc->sc_dmat, data->map,
1141		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
1142		if (error != 0) {
1143			printf("%s: could not load rx buf DMA map",
1144			    sc->sc_dev.dv_xname);
1145			goto fail;
1146		}
1147
1148		if (sc->sc_flags & NFE_40BIT_ADDR) {
1149			desc64 = &sc->rxq.desc64[i];
1150#if defined(__LP64__)
1151			desc64->physaddr[0] =
1152			    htole32(data->map->dm_segs->ds_addr >> 32);
1153#endif
1154			desc64->physaddr[1] =
1155			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
1156			desc64->length = htole16(MCLBYTES);
1157			desc64->flags = htole16(NFE_RX_READY);
1158		} else {
1159			desc32 = &sc->rxq.desc32[i];
1160			desc32->physaddr =
1161			    htole32(data->map->dm_segs->ds_addr);
1162			desc32->length = htole16(MCLBYTES);
1163			desc32->flags = htole16(NFE_RX_READY);
1164		}
1165	}
1166
1167	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1168	    BUS_DMASYNC_PREWRITE);
1169
1170	return 0;
1171
1172fail:	nfe_free_rx_ring(sc, ring);
1173	return error;
1174}
1175
1176void
1177nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1178{
1179	int i;
1180
1181	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1182		if (sc->sc_flags & NFE_40BIT_ADDR) {
1183			ring->desc64[i].length = htole16(MCLBYTES);
1184			ring->desc64[i].flags = htole16(NFE_RX_READY);
1185		} else {
1186			ring->desc32[i].length = htole16(MCLBYTES);
1187			ring->desc32[i].flags = htole16(NFE_RX_READY);
1188		}
1189	}
1190
1191	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1192	    BUS_DMASYNC_PREWRITE);
1193
1194	ring->cur = ring->next = 0;
1195}
1196
1197void
1198nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1199{
1200	struct nfe_rx_data *data;
1201	void *desc;
1202	int i, descsize;
1203
1204	if (sc->sc_flags & NFE_40BIT_ADDR) {
1205		desc = ring->desc64;
1206		descsize = sizeof (struct nfe_desc64);
1207	} else {
1208		desc = ring->desc32;
1209		descsize = sizeof (struct nfe_desc32);
1210	}
1211
1212	if (desc != NULL) {
1213		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1214		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1215		bus_dmamap_unload(sc->sc_dmat, ring->map);
1216		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1217		    NFE_RX_RING_COUNT * descsize);
1218		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1219	}
1220
1221	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1222		data = &ring->data[i];
1223
1224		if (data->m != NULL) {
1225			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1226			    data->map->dm_mapsize,
1227			    BUS_DMASYNC_POSTREAD);
1228			bus_dmamap_unload(sc->sc_dmat, data->map);
1229			m_freem(data->m);
1230		}
1231
1232		if (data->map != NULL)
1233			bus_dmamap_destroy(sc->sc_dmat, data->map);
1234	}
1235}
1236
1237int
1238nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1239{
1240	int i, nsegs, error;
1241	void **desc;
1242	int descsize;
1243
1244	if (sc->sc_flags & NFE_40BIT_ADDR) {
1245		desc = (void **)&ring->desc64;
1246		descsize = sizeof (struct nfe_desc64);
1247	} else {
1248		desc = (void **)&ring->desc32;
1249		descsize = sizeof (struct nfe_desc32);
1250	}
1251
1252	ring->queued = 0;
1253	ring->cur = ring->next = 0;
1254
1255	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1256	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1257
1258	if (error != 0) {
1259		printf("%s: could not create desc DMA map\n",
1260		    sc->sc_dev.dv_xname);
1261		goto fail;
1262	}
1263
1264	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1265	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1266	if (error != 0) {
1267		printf("%s: could not allocate DMA memory\n",
1268		    sc->sc_dev.dv_xname);
1269		goto fail;
1270	}
1271
1272	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1273	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1274	if (error != 0) {
1275		printf("%s: could not map desc DMA memory\n",
1276		    sc->sc_dev.dv_xname);
1277		goto fail;
1278	}
1279
1280	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1281	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1282	if (error != 0) {
1283		printf("%s: could not load desc DMA map\n",
1284		    sc->sc_dev.dv_xname);
1285		goto fail;
1286	}
1287
1288	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1289	ring->physaddr = ring->map->dm_segs->ds_addr;
1290
1291	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1292		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1293		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
1294		    &ring->data[i].map);
1295		if (error != 0) {
1296			printf("%s: could not create DMA map\n",
1297			    sc->sc_dev.dv_xname);
1298			goto fail;
1299		}
1300	}
1301
1302	return 0;
1303
1304fail:	nfe_free_tx_ring(sc, ring);
1305	return error;
1306}
1307
1308void
1309nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1310{
1311	struct nfe_tx_data *data;
1312	int i;
1313
1314	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1315		if (sc->sc_flags & NFE_40BIT_ADDR)
1316			ring->desc64[i].flags = 0;
1317		else
1318			ring->desc32[i].flags = 0;
1319
1320		data = &ring->data[i];
1321
1322		if (data->m != NULL) {
1323			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1324			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1325			bus_dmamap_unload(sc->sc_dmat, data->map);
1326			m_freem(data->m);
1327			data->m = NULL;
1328		}
1329	}
1330
1331	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1332	    BUS_DMASYNC_PREWRITE);
1333
1334	ring->queued = 0;
1335	ring->cur = ring->next = 0;
1336}
1337
1338void
1339nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1340{
1341	struct nfe_tx_data *data;
1342	void *desc;
1343	int i, descsize;
1344
1345	if (sc->sc_flags & NFE_40BIT_ADDR) {
1346		desc = ring->desc64;
1347		descsize = sizeof (struct nfe_desc64);
1348	} else {
1349		desc = ring->desc32;
1350		descsize = sizeof (struct nfe_desc32);
1351	}
1352
1353	if (desc != NULL) {
1354		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1355		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1356		bus_dmamap_unload(sc->sc_dmat, ring->map);
1357		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1358		    NFE_TX_RING_COUNT * descsize);
1359		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1360	}
1361
1362	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1363		data = &ring->data[i];
1364
1365		if (data->m != NULL) {
1366			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1367			    data->map->dm_mapsize,
1368			    BUS_DMASYNC_POSTWRITE);
1369			bus_dmamap_unload(sc->sc_dmat, data->map);
1370			m_freem(data->m);
1371		}
1372
1373		if (data->map != NULL)
1374			bus_dmamap_destroy(sc->sc_dmat, data->map);
1375	}
1376}
1377
1378int
1379nfe_ifmedia_upd(struct ifnet *ifp)
1380{
1381	struct nfe_softc *sc = ifp->if_softc;
1382	struct mii_data *mii = &sc->sc_mii;
1383	struct mii_softc *miisc;
1384
1385	if (mii->mii_instance != 0) {
1386		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1387			mii_phy_reset(miisc);
1388	}
1389	return mii_mediachg(mii);
1390}
1391
1392void
1393nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1394{
1395	struct nfe_softc *sc = ifp->if_softc;
1396	struct mii_data *mii = &sc->sc_mii;
1397
1398	mii_pollstat(mii);
1399	ifmr->ifm_status = mii->mii_media_status;
1400	ifmr->ifm_active = mii->mii_media_active;
1401}
1402
1403void
1404nfe_setmulti(struct nfe_softc *sc)
1405{
1406	struct arpcom *ac = &sc->sc_arpcom;
1407	struct ifnet *ifp = &ac->ac_if;
1408	struct ether_multi *enm;
1409	struct ether_multistep step;
1410	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1411	uint32_t filter = NFE_RXFILTER_MAGIC;
1412	int i;
1413
1414	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1415		bzero(addr, ETHER_ADDR_LEN);
1416		bzero(mask, ETHER_ADDR_LEN);
1417		goto done;
1418	}
1419
1420	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1421	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1422
1423	ETHER_FIRST_MULTI(step, ac, enm);
1424	while (enm != NULL) {
1425		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1426			ifp->if_flags |= IFF_ALLMULTI;
1427			bzero(addr, ETHER_ADDR_LEN);
1428			bzero(mask, ETHER_ADDR_LEN);
1429			goto done;
1430		}
1431		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1432			addr[i] &=  enm->enm_addrlo[i];
1433			mask[i] &= ~enm->enm_addrlo[i];
1434		}
1435		ETHER_NEXT_MULTI(step, enm);
1436	}
1437	for (i = 0; i < ETHER_ADDR_LEN; i++)
1438		mask[i] |= addr[i];
1439
1440done:
1441	addr[0] |= 0x01;	/* make sure multicast bit is set */
1442
1443	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1444	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1445	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1446	    addr[5] <<  8 | addr[4]);
1447	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1448	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1449	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1450	    mask[5] <<  8 | mask[4]);
1451
1452	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1453	NFE_WRITE(sc, NFE_RXFILTER, filter);
1454}
1455
1456void
1457nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1458{
1459	uint32_t tmp;
1460
1461	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1462	addr[0] = (tmp >> 8) & 0xff;
1463	addr[1] = (tmp & 0xff);
1464
1465	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1466	addr[2] = (tmp >> 24) & 0xff;
1467	addr[3] = (tmp >> 16) & 0xff;
1468	addr[4] = (tmp >>  8) & 0xff;
1469	addr[5] = (tmp & 0xff);
1470}
1471
1472void
1473nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1474{
1475	NFE_WRITE(sc, NFE_MACADDR_LO,
1476	    addr[5] <<  8 | addr[4]);
1477	NFE_WRITE(sc, NFE_MACADDR_HI,
1478	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1479}
1480
1481void
1482nfe_tick(void *arg)
1483{
1484	struct nfe_softc *sc = arg;
1485	int s;
1486
1487	s = splnet();
1488	mii_tick(&sc->sc_mii);
1489	splx(s);
1490
1491	timeout_add(&sc->sc_tick_ch, hz);
1492}
1493