if_nfe.c revision 1.28
1/*	$OpenBSD: if_nfe.c,v 1.28 2006/02/11 11:51:30 damien Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for nvidia nForce Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/malloc.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_media.h>
41
42#ifdef INET
43#include <netinet/in.h>
44#include <netinet/in_systm.h>
45#include <netinet/in_var.h>
46#include <netinet/ip.h>
47#include <netinet/if_ether.h>
48#endif
49
50#if NVLAN > 0
51#include <net/if_types.h>
52#include <net/if_vlan_var.h>
53#endif
54
55#if NBPFILTER > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64#include <dev/pci/pcidevs.h>
65
66#include <dev/pci/if_nfereg.h>
67#include <dev/pci/if_nfevar.h>
68
69int	nfe_match(struct device *, void *, void *);
70void	nfe_attach(struct device *, struct device *, void *);
71void	nfe_power(int, void *);
72void	nfe_miibus_statchg(struct device *);
73int	nfe_miibus_readreg(struct device *, int, int);
74void	nfe_miibus_writereg(struct device *, int, int, int);
75int	nfe_intr(void *);
76int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
77void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
78void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
79void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81void	nfe_rxeof(struct nfe_softc *);
82void	nfe_txeof(struct nfe_softc *);
83int	nfe_encap(struct nfe_softc *, struct mbuf *);
84void	nfe_start(struct ifnet *);
85void	nfe_watchdog(struct ifnet *);
86int	nfe_init(struct ifnet *);
87void	nfe_stop(struct ifnet *, int);
88int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
89void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
90void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
91int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
92void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
93void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
94int	nfe_ifmedia_upd(struct ifnet *);
95void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96void	nfe_setmulti(struct nfe_softc *);
97void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
98void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
99void	nfe_tick(void *);
100
101struct cfattach nfe_ca = {
102	sizeof (struct nfe_softc), nfe_match, nfe_attach
103};
104
105struct cfdriver nfe_cd = {
106	NULL, "nfe", DV_IFNET
107};
108
109#define NFE_DEBUG
110
111#ifdef NFE_DEBUG
112int nfedebug = 1;
113#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
114#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
115#else
116#define DPRINTF(x)
117#define DPRINTFN(n,x)
118#endif
119
120const struct pci_matchid nfe_devices[] = {
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }
136};
137
138int
139nfe_match(struct device *dev, void *match, void *aux)
140{
141	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
142	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
143}
144
145void
146nfe_attach(struct device *parent, struct device *self, void *aux)
147{
148	struct nfe_softc *sc = (struct nfe_softc *)self;
149	struct pci_attach_args *pa = aux;
150	pci_chipset_tag_t pc = pa->pa_pc;
151	pci_intr_handle_t ih;
152	const char *intrstr;
153	struct ifnet *ifp;
154	bus_size_t memsize;
155
156	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
157	    &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) {
158		printf(": can't map mem space\n");
159		return;
160	}
161
162	if (pci_intr_map(pa, &ih) != 0) {
163		printf(": couldn't map interrupt\n");
164		return;
165	}
166
167	intrstr = pci_intr_string(pc, ih);
168	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
169	    sc->sc_dev.dv_xname);
170	if (sc->sc_ih == NULL) {
171		printf(": couldn't establish interrupt");
172		if (intrstr != NULL)
173			printf(" at %s", intrstr);
174		printf("\n");
175		return;
176	}
177	printf(": %s", intrstr);
178
179	sc->sc_dmat = pa->pa_dmat;
180
181	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
182	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
183
184	sc->sc_flags = 0;
185
186	switch (PCI_PRODUCT(pa->pa_id)) {
187	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
188	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
189	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
190	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
191		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
192		break;
193	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
194	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
195		sc->sc_flags |= NFE_40BIT_ADDR;
196		break;
197	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
198	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
199	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
200	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
201	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
202	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
203		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
204		break;
205	}
206
207	/*
208	 * Allocate Tx and Rx rings.
209	 */
210	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
211		printf("%s: could not allocate Tx ring\n",
212		    sc->sc_dev.dv_xname);
213		return;
214	}
215
216	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
217		printf("%s: could not allocate Rx ring\n",
218		    sc->sc_dev.dv_xname);
219		nfe_free_tx_ring(sc, &sc->txq);
220		return;
221	}
222
223	ifp = &sc->sc_arpcom.ac_if;
224	ifp->if_softc = sc;
225	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226	ifp->if_ioctl = nfe_ioctl;
227	ifp->if_start = nfe_start;
228	ifp->if_watchdog = nfe_watchdog;
229	ifp->if_init = nfe_init;
230	ifp->if_baudrate = IF_Gbps(1);
231	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
232	IFQ_SET_READY(&ifp->if_snd);
233	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
234
235#ifdef NFE_CSUM
236	if (sc->sc_flags & NFE_HW_CSUM) {
237		ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
238		    IFCAP_CSUM_UDPv4;
239	}
240#endif
241
242	sc->sc_mii.mii_ifp = ifp;
243	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
244	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
245	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
246
247	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
248	    nfe_ifmedia_sts);
249	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
250	    MII_OFFSET_ANY, 0);
251	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
252		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
253		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
254		    0, NULL);
255		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
256	} else
257		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
258
259	if_attach(ifp);
260	ether_ifattach(ifp);
261
262	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
263
264	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
265}
266
267void
268nfe_power(int why, void *arg)
269{
270	struct nfe_softc *sc = arg;
271	struct ifnet *ifp;
272
273	if (why == PWR_RESUME) {
274		ifp = &sc->sc_arpcom.ac_if;
275		if (ifp->if_flags & IFF_UP) {
276			ifp->if_flags &= ~IFF_RUNNING;
277			nfe_init(ifp);
278			if (ifp->if_flags & IFF_RUNNING)
279				nfe_start(ifp);
280		}
281	}
282}
283
284void
285nfe_miibus_statchg(struct device *dev)
286{
287	struct nfe_softc *sc = (struct nfe_softc *)dev;
288	struct mii_data *mii = &sc->sc_mii;
289	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
290
291	phy = NFE_READ(sc, NFE_PHY_IFACE);
292	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
293
294	seed = NFE_READ(sc, NFE_RNDSEED);
295	seed &= ~NFE_SEED_MASK;
296
297	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
298		phy  |= NFE_PHY_HDX;	/* half-duplex */
299		misc |= NFE_MISC1_HDX;
300	}
301
302	switch (IFM_SUBTYPE(mii->mii_media_active)) {
303	case IFM_1000_T:	/* full-duplex only */
304		link |= NFE_MEDIA_1000T;
305		seed |= NFE_SEED_1000T;
306		phy  |= NFE_PHY_1000T;
307		break;
308	case IFM_100_TX:
309		link |= NFE_MEDIA_100TX;
310		seed |= NFE_SEED_100TX;
311		phy  |= NFE_PHY_100TX;
312		break;
313	case IFM_10_T:
314		link |= NFE_MEDIA_10T;
315		seed |= NFE_SEED_10T;
316		break;
317	}
318
319	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
320
321	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
322	NFE_WRITE(sc, NFE_MISC1, misc);
323	NFE_WRITE(sc, NFE_LINKSPEED, link);
324}
325
326int
327nfe_miibus_readreg(struct device *dev, int phy, int reg)
328{
329	struct nfe_softc *sc = (struct nfe_softc *)dev;
330	uint32_t val;
331	int ntries;
332
333	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
334
335	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
336		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
337		DELAY(100);
338	}
339
340	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
341
342	for (ntries = 0; ntries < 1000; ntries++) {
343		DELAY(100);
344		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
345			break;
346	}
347	if (ntries == 1000) {
348		DPRINTFN(2, ("timeout waiting for PHY\n"));
349		return 0;
350	}
351
352	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
353		DPRINTFN(2, ("could not read PHY\n"));
354		return 0;
355	}
356
357	val = NFE_READ(sc, NFE_PHY_DATA);
358	if (val != 0xffffffff && val != 0)
359		sc->phyaddr = phy;
360
361	DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val));
362
363	return val;
364}
365
366void
367nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
368{
369	struct nfe_softc *sc = (struct nfe_softc *)dev;
370	uint32_t ctl;
371	int ntries;
372
373	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
374
375	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
376		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
377		DELAY(100);
378	}
379
380	NFE_WRITE(sc, NFE_PHY_DATA, val);
381	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
382	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
383
384	for (ntries = 0; ntries < 1000; ntries++) {
385		DELAY(100);
386		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
387			break;
388	}
389#ifdef NFE_DEBUG
390	if (nfedebug >= 2 && ntries == 1000)
391		printf("could not write to PHY\n");
392#endif
393}
394
395int
396nfe_intr(void *arg)
397{
398	struct nfe_softc *sc = arg;
399	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
400	uint32_t r;
401
402	/* disable interrupts */
403	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
404
405	r = NFE_READ(sc, NFE_IRQ_STATUS);
406	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
407
408	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
409
410	if (r == 0) {
411		/* re-enable interrupts */
412		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
413		return 0;
414	}
415
416	if (r & NFE_IRQ_LINK) {
417		NFE_READ(sc, NFE_PHY_STATUS);
418		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
419		DPRINTF(("link state changed\n"));
420	}
421
422	if (ifp->if_flags & IFF_RUNNING) {
423		/* check Rx ring */
424		nfe_rxeof(sc);
425
426		/* check Tx ring */
427		nfe_txeof(sc);
428	}
429
430	/* re-enable interrupts */
431	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
432
433	return 1;
434}
435
436int
437nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
438{
439	struct nfe_softc *sc = ifp->if_softc;
440	struct ifreq *ifr = (struct ifreq *)data;
441	struct ifaddr *ifa = (struct ifaddr *)data;
442	int s, error = 0;
443
444	s = splnet();
445
446	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
447		splx(s);
448		return error;
449	}
450
451	switch (cmd) {
452	case SIOCSIFADDR:
453		ifp->if_flags |= IFF_UP;
454		nfe_init(ifp);
455		switch (ifa->ifa_addr->sa_family) {
456#ifdef INET
457		case AF_INET:
458			arp_ifinit(&sc->sc_arpcom, ifa);
459			break;
460#endif
461		default:
462			break;
463		}
464		break;
465	case SIOCSIFMTU:
466		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
467			error = EINVAL;
468		else if (ifp->if_mtu != ifr->ifr_mtu)
469			ifp->if_mtu = ifr->ifr_mtu;
470		break;
471	case SIOCSIFFLAGS:
472		if (ifp->if_flags & IFF_UP) {
473			/*
474			 * If only the PROMISC or ALLMULTI flag changes, then
475			 * don't do a full re-init of the chip, just update
476			 * the Rx filter.
477			 */
478			if ((ifp->if_flags & IFF_RUNNING) &&
479			    ((ifp->if_flags ^ sc->sc_if_flags) &
480			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
481				nfe_setmulti(sc);
482			else
483				nfe_init(ifp);
484		} else {
485			if (ifp->if_flags & IFF_RUNNING)
486				nfe_stop(ifp, 1);
487		}
488		sc->sc_if_flags = ifp->if_flags;
489		break;
490	case SIOCADDMULTI:
491	case SIOCDELMULTI:
492		error = (cmd == SIOCADDMULTI) ?
493		    ether_addmulti(ifr, &sc->sc_arpcom) :
494		    ether_delmulti(ifr, &sc->sc_arpcom);
495
496		if (error == ENETRESET) {
497			if (ifp->if_flags & IFF_RUNNING)
498				nfe_setmulti(sc);
499			error = 0;
500		}
501		break;
502	case SIOCSIFMEDIA:
503	case SIOCGIFMEDIA:
504		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
505		break;
506	default:
507		error = EINVAL;
508	}
509
510	splx(s);
511
512	return error;
513}
514
515void
516nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
517{
518	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
519	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
520	    sizeof (struct nfe_desc32), ops);
521}
522
523void
524nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
525{
526	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
527	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
528	    sizeof (struct nfe_desc64), ops);
529}
530
531void
532nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
533{
534	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
535	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
536	    sizeof (struct nfe_desc32), ops);
537}
538
539void
540nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
541{
542	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
543	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
544	    sizeof (struct nfe_desc64), ops);
545}
546
547void
548nfe_rxeof(struct nfe_softc *sc)
549{
550	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
551	struct nfe_desc32 *desc32;
552	struct nfe_desc64 *desc64;
553	struct nfe_rx_data *data;
554	struct mbuf *m, *mnew;
555	uint16_t flags;
556	int error, len;
557
558	for (;;) {
559		data = &sc->rxq.data[sc->rxq.cur];
560
561		if (sc->sc_flags & NFE_40BIT_ADDR) {
562			desc64 = &sc->rxq.desc64[sc->rxq.cur];
563			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
564
565			flags = letoh16(desc64->flags);
566			len = letoh16(desc64->length) & 0x3fff;
567		} else {
568			desc32 = &sc->rxq.desc32[sc->rxq.cur];
569			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
570
571			flags = letoh16(desc32->flags);
572			len = letoh16(desc32->length) & 0x3fff;
573		}
574
575		if (flags & NFE_RX_READY)
576			break;
577
578		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
579			if (!(flags & NFE_RX_VALID_V1))
580				goto skip;
581
582			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
583				flags &= ~NFE_RX_ERROR;
584				len--;	/* fix buffer length */
585			}
586		} else {
587			if (!(flags & NFE_RX_VALID_V2))
588				goto skip;
589
590			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
591				flags &= ~NFE_RX_ERROR;
592				len--;	/* fix buffer length */
593			}
594		}
595
596		if (flags & NFE_RX_ERROR) {
597			ifp->if_ierrors++;
598			goto skip;
599		}
600
601		/*
602		 * Try to allocate a new mbuf for this ring element and load
603		 * it before processing the current mbuf. If the ring element
604		 * cannot be loaded, drop the received packet and reuse the
605		 * old mbuf. In the unlikely case that the old mbuf can't be
606		 * reloaded either, explicitly panic.
607		 */
608		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
609		if (mnew == NULL) {
610			ifp->if_ierrors++;
611			goto skip;
612		}
613
614		MCLGET(mnew, M_DONTWAIT);
615		if (!(mnew->m_flags & M_EXT)) {
616			m_freem(mnew);
617			ifp->if_ierrors++;
618			goto skip;
619		}
620
621		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
622		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
623		bus_dmamap_unload(sc->sc_dmat, data->map);
624
625		error = bus_dmamap_load(sc->sc_dmat, data->map,
626		    mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
627		if (error != 0) {
628			m_freem(mnew);
629
630			/* try to reload the old mbuf */
631			error = bus_dmamap_load(sc->sc_dmat, data->map,
632			    mtod(data->m, void *), MCLBYTES, NULL,
633			    BUS_DMA_NOWAIT);
634			if (error != 0) {
635				/* very unlikely that it will fail... */
636				panic("%s: could not load old rx mbuf",
637				    sc->sc_dev.dv_xname);
638			}
639			ifp->if_ierrors++;
640			goto skip;
641		}
642
643		/*
644		 * New mbuf successfully loaded, update Rx ring and continue
645		 * processing.
646		 */
647		m = data->m;
648		data->m = mnew;
649
650		/* finalize mbuf */
651		m->m_pkthdr.len = m->m_len = len;
652		m->m_pkthdr.rcvif = ifp;
653
654#ifdef notyet
655		if (sc->sc_flags & NFE_HW_CSUM) {
656			if (flags & NFE_RX_IP_CSUMOK)
657				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
658			if (flags & NFE_RX_UDP_CSUMOK)
659				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
660			if (flags & NFE_RX_TCP_CSUMOK)
661				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
662		}
663#elif defined(NFE_CSUM)
664		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
665			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
666#endif
667
668#if NBPFILTER > 0
669		if (ifp->if_bpf)
670			bpf_mtap(ifp->if_bpf, m);
671#endif
672		ifp->if_ipackets++;
673		ether_input_mbuf(ifp, m);
674
675skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
676#if defined(__LP64__)
677			desc64->physaddr[0] =
678			    htole32(data->map->dm_segs->ds_addr >> 32);
679#endif
680			desc64->physaddr[1] =
681			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
682			desc64->flags = htole16(NFE_RX_READY);
683			desc64->length = htole16(MCLBYTES);
684
685			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
686		} else {
687			desc32->physaddr =
688			    htole32(data->map->dm_segs->ds_addr);
689			desc32->flags = htole16(NFE_RX_READY);
690			desc32->length = htole16(MCLBYTES);
691
692			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
693		}
694
695		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
696	}
697}
698
699void
700nfe_txeof(struct nfe_softc *sc)
701{
702	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
703	struct nfe_desc32 *desc32;
704	struct nfe_desc64 *desc64;
705	struct nfe_tx_data *data;
706	uint16_t flags;
707
708	while (sc->txq.next != sc->txq.cur) {
709		data = &sc->txq.data[sc->txq.next];
710
711		if (sc->sc_flags & NFE_40BIT_ADDR) {
712			desc64 = &sc->txq.desc64[sc->txq.next];
713			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
714
715			flags = letoh16(desc64->flags);
716		} else {
717			desc32 = &sc->txq.desc32[sc->txq.next];
718			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
719
720			flags = letoh16(desc32->flags);
721		}
722
723		if (flags & NFE_TX_VALID)
724			break;
725
726		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
727			if (!(flags & NFE_TX_LASTFRAG_V1))
728				goto skip;
729
730			if ((flags & NFE_TX_ERROR_V1) != 0) {
731				DPRINTF(("tx error 0x%04x\n", flags));
732				ifp->if_oerrors++;
733			} else
734				ifp->if_opackets++;
735		} else {
736			if (!(flags & NFE_TX_LASTFRAG_V2))
737				goto skip;
738
739			if ((flags & NFE_TX_ERROR_V2) != 0) {
740				DPRINTF(("tx error 0x%04x\n", flags));
741				ifp->if_oerrors++;
742			} else
743				ifp->if_opackets++;
744		}
745
746		if (data->m == NULL) {	/* should not get there */
747			DPRINTF(("last fragment bit w/o associated mbuf!\n"));
748			goto skip;
749		}
750
751		/* last fragment of the mbuf chain transmitted */
752		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
753		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
754		bus_dmamap_unload(sc->sc_dmat, data->active);
755		m_freem(data->m);
756		data->m = NULL;
757
758skip:		sc->txq.queued--;
759		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
760	}
761
762	ifp->if_timer = 0;
763	ifp->if_flags &= ~IFF_OACTIVE;
764	nfe_start(ifp);
765}
766
767int
768nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
769{
770	struct nfe_desc32 *desc32;
771	struct nfe_desc64 *desc64;
772	struct nfe_tx_data *data;
773	struct mbuf *mnew;
774	bus_dmamap_t map;
775	uint16_t flags = NFE_TX_VALID;
776	int error, i;
777
778	map = sc->txq.data[sc->txq.cur].map;
779
780	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
781	if (error != 0 && error != EFBIG) {
782		printf("%s: could not map mbuf (error %d)\n",
783		    sc->sc_dev.dv_xname, error);
784		return error;
785	}
786	if (error != 0) {
787		/* too many fragments, linearize */
788
789		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
790		if (mnew == NULL)
791			return ENOBUFS;
792
793		M_DUP_PKTHDR(mnew, m0);
794		if (m0->m_pkthdr.len > MHLEN) {
795			MCLGET(mnew, M_DONTWAIT);
796			if (!(mnew->m_flags & M_EXT)) {
797				m_freem(mnew);
798				return ENOBUFS;
799			}
800		}
801
802		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
803		m_freem(m0);
804		mnew->m_len = mnew->m_pkthdr.len;
805		m0 = mnew;
806
807		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
808		    BUS_DMA_NOWAIT);
809		if (error != 0) {
810			printf("%s: could not map mbuf (error %d)\n",
811			    sc->sc_dev.dv_xname, error);
812			m_freem(m0);
813			return error;
814		}
815	}
816
817	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
818		bus_dmamap_unload(sc->sc_dmat, map);
819		return ENOBUFS;
820	}
821
822#ifdef NFE_CSUM
823	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
824		flags |= NFE_TX_IP_CSUM;
825	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
826		flags |= NFE_TX_TCP_CSUM;
827#endif
828
829	for (i = 0; i < map->dm_nsegs; i++) {
830		data = &sc->txq.data[sc->txq.cur];
831
832		if (sc->sc_flags & NFE_40BIT_ADDR) {
833			desc64 = &sc->txq.desc64[sc->txq.cur];
834#if defined(__LP64__)
835			desc64->physaddr[0] =
836			    htole32(map->dm_segs[i].ds_addr >> 32);
837#endif
838			desc64->physaddr[1] =
839			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
840			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
841			desc64->flags = htole16(flags);
842
843			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
844		} else {
845			desc32 = &sc->txq.desc32[sc->txq.cur];
846
847			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
848			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
849			desc32->flags = htole16(flags);
850
851			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
852		}
853
854		/* csum flags belong to the first fragment only */
855		if (map->dm_nsegs > 1)
856			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
857
858		sc->txq.queued++;
859		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
860	}
861
862	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
863	if (sc->sc_flags & NFE_40BIT_ADDR) {
864		flags |= NFE_TX_LASTFRAG_V2;
865
866		desc64->flags = htole16(flags);
867		nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
868	} else {
869		if (sc->sc_flags & NFE_JUMBO_SUP)
870			flags |= NFE_TX_LASTFRAG_V2;
871		else
872			flags |= NFE_TX_LASTFRAG_V1;
873
874		desc32->flags = htole16(flags);
875		nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
876	}
877
878	data->m = m0;
879	data->active = map;
880
881	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
882	    BUS_DMASYNC_PREWRITE);
883
884	return 0;
885}
886
887void
888nfe_start(struct ifnet *ifp)
889{
890	struct nfe_softc *sc = ifp->if_softc;
891	struct mbuf *m0;
892	uint32_t txctl;
893	int pkts = 0;
894
895	for (;;) {
896		IFQ_POLL(&ifp->if_snd, m0);
897		if (m0 == NULL)
898			break;
899
900		if (nfe_encap(sc, m0) != 0) {
901			ifp->if_flags |= IFF_OACTIVE;
902			break;
903		}
904
905		/* packet put in h/w queue, remove from s/w queue */
906		IFQ_DEQUEUE(&ifp->if_snd, m0);
907		pkts++;
908
909#if NBPFILTER > 0
910		if (ifp->if_bpf != NULL)
911			bpf_mtap(ifp->if_bpf, m0);
912#endif
913	}
914	if (pkts == 0)
915		return;
916
917	txctl = NFE_RXTX_KICKTX;
918	if (sc->sc_flags & NFE_40BIT_ADDR)
919		txctl |= NFE_RXTX_V3MAGIC;
920	else if (sc->sc_flags & NFE_JUMBO_SUP)
921		txctl |= NFE_RXTX_V2MAGIC;
922#ifdef NFE_CSUM
923	if (sc->sc_flags & NFE_HW_CSUM)
924		txctl |= NFE_RXTX_RXCHECK;
925#endif
926
927	/* kick Tx */
928	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
929
930	/*
931	 * Set a timeout in case the chip goes out to lunch.
932	 */
933	ifp->if_timer = 5;
934}
935
936void
937nfe_watchdog(struct ifnet *ifp)
938{
939	struct nfe_softc *sc = ifp->if_softc;
940
941	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
942
943	ifp->if_flags &= ~IFF_RUNNING;
944	nfe_init(ifp);
945
946	ifp->if_oerrors++;
947}
948
949int
950nfe_init(struct ifnet *ifp)
951{
952	struct nfe_softc *sc = ifp->if_softc;
953	uint32_t tmp, rxtxctl;
954
955	if (ifp->if_flags & IFF_RUNNING)
956		return 0;
957
958	nfe_stop(ifp, 0);
959
960	nfe_ifmedia_upd(ifp);
961
962	NFE_WRITE(sc, NFE_TX_UNK, 0);
963
964	rxtxctl = NFE_RXTX_BIT2;
965	if (sc->sc_flags & NFE_40BIT_ADDR)
966		rxtxctl |= NFE_RXTX_V3MAGIC;
967	else if (sc->sc_flags & NFE_JUMBO_SUP)
968		rxtxctl |= NFE_RXTX_V2MAGIC;
969#ifdef NFE_CSUM
970	if (sc->sc_flags & NFE_HW_CSUM)
971		rxtxctl |= NFE_RXTX_RXCHECK;
972#endif
973
974	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
975	DELAY(10);
976	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
977
978	NFE_WRITE(sc, NFE_SETUP_R6, 0);
979
980	/* set MAC address */
981	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
982
983	/* tell MAC where rings are in memory */
984	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
985	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
986
987	NFE_WRITE(sc, NFE_RING_SIZE,
988	    (NFE_RX_RING_COUNT - 1) << 16 |
989	    (NFE_TX_RING_COUNT - 1));
990
991	NFE_WRITE(sc, NFE_RXBUFSZ, MCLBYTES);
992
993	/* force MAC to wakeup */
994	tmp = NFE_READ(sc, NFE_PWR_STATE);
995	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
996	DELAY(10);
997	tmp = NFE_READ(sc, NFE_PWR_STATE);
998	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
999
1000	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1001	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1002	NFE_WRITE(sc, NFE_TIMER_INT, 970);	/* XXX Magic */
1003
1004	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1005	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1006
1007	rxtxctl &= ~NFE_RXTX_BIT2;
1008	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1009	DELAY(10);
1010	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
1011
1012	/* set Rx filter */
1013	nfe_setmulti(sc);
1014
1015	/* enable Rx */
1016	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1017
1018	/* enable Tx */
1019	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1020
1021	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1022
1023	/* enable interrupts */
1024	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1025
1026	timeout_add(&sc->sc_tick_ch, hz);
1027
1028	ifp->if_flags |= IFF_RUNNING;
1029	ifp->if_flags &= ~IFF_OACTIVE;
1030
1031	return 0;
1032}
1033
1034void
1035nfe_stop(struct ifnet *ifp, int disable)
1036{
1037	struct nfe_softc *sc = ifp->if_softc;
1038
1039	timeout_del(&sc->sc_tick_ch);
1040
1041	ifp->if_timer = 0;
1042	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1043
1044	mii_down(&sc->sc_mii);
1045
1046	/* abort Tx */
1047	NFE_WRITE(sc, NFE_TX_CTL, 0);
1048
1049	/* disable Rx */
1050	NFE_WRITE(sc, NFE_RX_CTL, 0);
1051
1052	/* disable interrupts */
1053	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1054
1055	/* reset Tx and Rx rings */
1056	nfe_reset_tx_ring(sc, &sc->txq);
1057	nfe_reset_rx_ring(sc, &sc->rxq);
1058}
1059
1060int
1061nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1062{
1063	struct nfe_rx_data *data;
1064	struct nfe_desc32 *desc32;
1065	struct nfe_desc64 *desc64;
1066	void **desc;
1067	int i, nsegs, error, descsize;
1068
1069	if (sc->sc_flags & NFE_40BIT_ADDR) {
1070		desc = (void **)&ring->desc64;
1071		descsize = sizeof (struct nfe_desc64);
1072	} else {
1073		desc = (void **)&ring->desc32;
1074		descsize = sizeof (struct nfe_desc32);
1075	}
1076
1077	ring->cur = ring->next = 0;
1078
1079	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1080	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1081	if (error != 0) {
1082		printf("%s: could not create desc DMA map\n",
1083		    sc->sc_dev.dv_xname);
1084		goto fail;
1085	}
1086
1087	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1088	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1089	if (error != 0) {
1090		printf("%s: could not allocate DMA memory\n",
1091		    sc->sc_dev.dv_xname);
1092		goto fail;
1093	}
1094
1095	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1096	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1097	if (error != 0) {
1098		printf("%s: could not map desc DMA memory\n",
1099		    sc->sc_dev.dv_xname);
1100		goto fail;
1101	}
1102
1103	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1104	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1105	if (error != 0) {
1106		printf("%s: could not load desc DMA map\n",
1107		    sc->sc_dev.dv_xname);
1108		goto fail;
1109	}
1110
1111	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1112	ring->physaddr = ring->map->dm_segs->ds_addr;
1113
1114	/*
1115	 * Pre-allocate Rx buffers and populate Rx ring.
1116	 */
1117	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1118		data = &sc->rxq.data[i];
1119
1120		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1121		    0, BUS_DMA_NOWAIT, &data->map);
1122		if (error != 0) {
1123			printf("%s: could not create DMA map\n",
1124			    sc->sc_dev.dv_xname);
1125			goto fail;
1126		}
1127
1128		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1129		if (data->m == NULL) {
1130			printf("%s: could not allocate rx mbuf\n",
1131			    sc->sc_dev.dv_xname);
1132			error = ENOMEM;
1133			goto fail;
1134		}
1135
1136		MCLGET(data->m, M_DONTWAIT);
1137		if (!(data->m->m_flags & M_EXT)) {
1138			printf("%s: could not allocate rx mbuf cluster\n",
1139			    sc->sc_dev.dv_xname);
1140			error = ENOMEM;
1141			goto fail;
1142		}
1143
1144		error = bus_dmamap_load(sc->sc_dmat, data->map,
1145		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
1146		if (error != 0) {
1147			printf("%s: could not load rx buf DMA map",
1148			    sc->sc_dev.dv_xname);
1149			goto fail;
1150		}
1151
1152		if (sc->sc_flags & NFE_40BIT_ADDR) {
1153			desc64 = &sc->rxq.desc64[i];
1154#if defined(__LP64__)
1155			desc64->physaddr[0] =
1156			    htole32(data->map->dm_segs->ds_addr >> 32);
1157#endif
1158			desc64->physaddr[1] =
1159			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
1160			desc64->length = htole16(MCLBYTES);
1161			desc64->flags = htole16(NFE_RX_READY);
1162		} else {
1163			desc32 = &sc->rxq.desc32[i];
1164			desc32->physaddr =
1165			    htole32(data->map->dm_segs->ds_addr);
1166			desc32->length = htole16(MCLBYTES);
1167			desc32->flags = htole16(NFE_RX_READY);
1168		}
1169	}
1170
1171	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1172	    BUS_DMASYNC_PREWRITE);
1173
1174	return 0;
1175
1176fail:	nfe_free_rx_ring(sc, ring);
1177	return error;
1178}
1179
1180void
1181nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1182{
1183	int i;
1184
1185	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1186		if (sc->sc_flags & NFE_40BIT_ADDR) {
1187			ring->desc64[i].length = htole16(MCLBYTES);
1188			ring->desc64[i].flags = htole16(NFE_RX_READY);
1189		} else {
1190			ring->desc32[i].length = htole16(MCLBYTES);
1191			ring->desc32[i].flags = htole16(NFE_RX_READY);
1192		}
1193	}
1194
1195	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1196	    BUS_DMASYNC_PREWRITE);
1197
1198	ring->cur = ring->next = 0;
1199}
1200
1201void
1202nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1203{
1204	struct nfe_rx_data *data;
1205	void *desc;
1206	int i, descsize;
1207
1208	if (sc->sc_flags & NFE_40BIT_ADDR) {
1209		desc = ring->desc64;
1210		descsize = sizeof (struct nfe_desc64);
1211	} else {
1212		desc = ring->desc32;
1213		descsize = sizeof (struct nfe_desc32);
1214	}
1215
1216	if (desc != NULL) {
1217		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1218		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1219		bus_dmamap_unload(sc->sc_dmat, ring->map);
1220		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1221		    NFE_RX_RING_COUNT * descsize);
1222		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1223	}
1224
1225	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1226		data = &ring->data[i];
1227
1228		if (data->m != NULL) {
1229			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1230			    data->map->dm_mapsize,
1231			    BUS_DMASYNC_POSTREAD);
1232			bus_dmamap_unload(sc->sc_dmat, data->map);
1233			m_freem(data->m);
1234		}
1235
1236		if (data->map != NULL)
1237			bus_dmamap_destroy(sc->sc_dmat, data->map);
1238	}
1239}
1240
1241int
1242nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1243{
1244	int i, nsegs, error;
1245	void **desc;
1246	int descsize;
1247
1248	if (sc->sc_flags & NFE_40BIT_ADDR) {
1249		desc = (void **)&ring->desc64;
1250		descsize = sizeof (struct nfe_desc64);
1251	} else {
1252		desc = (void **)&ring->desc32;
1253		descsize = sizeof (struct nfe_desc32);
1254	}
1255
1256	ring->queued = 0;
1257	ring->cur = ring->next = 0;
1258
1259	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1260	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1261
1262	if (error != 0) {
1263		printf("%s: could not create desc DMA map\n",
1264		    sc->sc_dev.dv_xname);
1265		goto fail;
1266	}
1267
1268	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1269	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1270	if (error != 0) {
1271		printf("%s: could not allocate DMA memory\n",
1272		    sc->sc_dev.dv_xname);
1273		goto fail;
1274	}
1275
1276	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1277	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1278	if (error != 0) {
1279		printf("%s: could not map desc DMA memory\n",
1280		    sc->sc_dev.dv_xname);
1281		goto fail;
1282	}
1283
1284	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1285	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1286	if (error != 0) {
1287		printf("%s: could not load desc DMA map\n",
1288		    sc->sc_dev.dv_xname);
1289		goto fail;
1290	}
1291
1292	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1293	ring->physaddr = ring->map->dm_segs->ds_addr;
1294
1295	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1296		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1297		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
1298		    &ring->data[i].map);
1299		if (error != 0) {
1300			printf("%s: could not create DMA map\n",
1301			    sc->sc_dev.dv_xname);
1302			goto fail;
1303		}
1304	}
1305
1306	return 0;
1307
1308fail:	nfe_free_tx_ring(sc, ring);
1309	return error;
1310}
1311
1312void
1313nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1314{
1315	struct nfe_tx_data *data;
1316	int i;
1317
1318	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1319		if (sc->sc_flags & NFE_40BIT_ADDR)
1320			ring->desc64[i].flags = 0;
1321		else
1322			ring->desc32[i].flags = 0;
1323
1324		data = &ring->data[i];
1325
1326		if (data->m != NULL) {
1327			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1328			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1329			bus_dmamap_unload(sc->sc_dmat, data->active);
1330			m_freem(data->m);
1331			data->m = NULL;
1332		}
1333	}
1334
1335	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1336	    BUS_DMASYNC_PREWRITE);
1337
1338	ring->queued = 0;
1339	ring->cur = ring->next = 0;
1340}
1341
1342void
1343nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1344{
1345	struct nfe_tx_data *data;
1346	void *desc;
1347	int i, descsize;
1348
1349	if (sc->sc_flags & NFE_40BIT_ADDR) {
1350		desc = ring->desc64;
1351		descsize = sizeof (struct nfe_desc64);
1352	} else {
1353		desc = ring->desc32;
1354		descsize = sizeof (struct nfe_desc32);
1355	}
1356
1357	if (desc != NULL) {
1358		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1359		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1360		bus_dmamap_unload(sc->sc_dmat, ring->map);
1361		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1362		    NFE_TX_RING_COUNT * descsize);
1363		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1364	}
1365
1366	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1367		data = &ring->data[i];
1368
1369		if (data->m != NULL) {
1370			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1371			    data->active->dm_mapsize,
1372			    BUS_DMASYNC_POSTWRITE);
1373			bus_dmamap_unload(sc->sc_dmat, data->active);
1374			m_freem(data->m);
1375		}
1376	}
1377
1378	/* ..and now actually destroy the DMA mappings */
1379	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1380		data = &ring->data[i];
1381		if (data->map == NULL)
1382			continue;
1383		bus_dmamap_destroy(sc->sc_dmat, data->map);
1384	}
1385}
1386
1387int
1388nfe_ifmedia_upd(struct ifnet *ifp)
1389{
1390	struct nfe_softc *sc = ifp->if_softc;
1391	struct mii_data *mii = &sc->sc_mii;
1392	struct mii_softc *miisc;
1393
1394	if (mii->mii_instance != 0) {
1395		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1396			mii_phy_reset(miisc);
1397	}
1398	return mii_mediachg(mii);
1399}
1400
1401void
1402nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1403{
1404	struct nfe_softc *sc = ifp->if_softc;
1405	struct mii_data *mii = &sc->sc_mii;
1406
1407	mii_pollstat(mii);
1408	ifmr->ifm_status = mii->mii_media_status;
1409	ifmr->ifm_active = mii->mii_media_active;
1410}
1411
1412void
1413nfe_setmulti(struct nfe_softc *sc)
1414{
1415	struct arpcom *ac = &sc->sc_arpcom;
1416	struct ifnet *ifp = &ac->ac_if;
1417	struct ether_multi *enm;
1418	struct ether_multistep step;
1419	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1420	uint32_t filter = NFE_RXFILTER_MAGIC;
1421	int i;
1422
1423	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1424		bzero(addr, ETHER_ADDR_LEN);
1425		bzero(mask, ETHER_ADDR_LEN);
1426		goto done;
1427	}
1428
1429	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1430	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1431
1432	ETHER_FIRST_MULTI(step, ac, enm);
1433	while (enm != NULL) {
1434		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1435			ifp->if_flags |= IFF_ALLMULTI;
1436			bzero(addr, ETHER_ADDR_LEN);
1437			bzero(mask, ETHER_ADDR_LEN);
1438			goto done;
1439		}
1440		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1441			addr[i] &=  enm->enm_addrlo[i];
1442			mask[i] &= ~enm->enm_addrlo[i];
1443		}
1444		ETHER_NEXT_MULTI(step, enm);
1445	}
1446	for (i = 0; i < ETHER_ADDR_LEN; i++)
1447		mask[i] |= addr[i];
1448
1449done:
1450	addr[0] |= 0x01;	/* make sure multicast bit is set */
1451
1452	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1453	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1454	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1455	    addr[5] <<  8 | addr[4]);
1456	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1457	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1458	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1459	    mask[5] <<  8 | mask[4]);
1460
1461	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1462	NFE_WRITE(sc, NFE_RXFILTER, filter);
1463}
1464
1465void
1466nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1467{
1468	uint32_t tmp;
1469
1470	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1471	addr[0] = (tmp >> 8) & 0xff;
1472	addr[1] = (tmp & 0xff);
1473
1474	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1475	addr[2] = (tmp >> 24) & 0xff;
1476	addr[3] = (tmp >> 16) & 0xff;
1477	addr[4] = (tmp >>  8) & 0xff;
1478	addr[5] = (tmp & 0xff);
1479}
1480
1481void
1482nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1483{
1484	NFE_WRITE(sc, NFE_MACADDR_LO,
1485	    addr[5] <<  8 | addr[4]);
1486	NFE_WRITE(sc, NFE_MACADDR_HI,
1487	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1488}
1489
1490void
1491nfe_tick(void *arg)
1492{
1493	struct nfe_softc *sc = arg;
1494	int s;
1495
1496	s = splnet();
1497	mii_tick(&sc->sc_mii);
1498	splx(s);
1499
1500	timeout_add(&sc->sc_tick_ch, hz);
1501}
1502