if_nfe.c revision 1.88
1/*	$OpenBSD: if_nfe.c,v 1.88 2009/03/29 21:53:52 sthen Exp $	*/
2
3/*-
4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/queue.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/timeout.h>
35#include <sys/socket.h>
36
37#include <machine/bus.h>
38
39#include <net/if.h>
40#include <net/if_dl.h>
41#include <net/if_media.h>
42
43#ifdef INET
44#include <netinet/in.h>
45#include <netinet/in_systm.h>
46#include <netinet/in_var.h>
47#include <netinet/ip.h>
48#include <netinet/if_ether.h>
49#endif
50
51#if NVLAN > 0
52#include <net/if_types.h>
53#include <net/if_vlan_var.h>
54#endif
55
56#if NBPFILTER > 0
57#include <net/bpf.h>
58#endif
59
60#include <dev/mii/mii.h>
61#include <dev/mii/miivar.h>
62
63#include <dev/pci/pcireg.h>
64#include <dev/pci/pcivar.h>
65#include <dev/pci/pcidevs.h>
66
67#include <dev/pci/if_nfereg.h>
68#include <dev/pci/if_nfevar.h>
69
70int	nfe_match(struct device *, void *, void *);
71void	nfe_attach(struct device *, struct device *, void *);
72void	nfe_power(int, void *);
73void	nfe_miibus_statchg(struct device *);
74int	nfe_miibus_readreg(struct device *, int, int);
75void	nfe_miibus_writereg(struct device *, int, int, int);
76int	nfe_intr(void *);
77int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
78void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
79void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
80void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
81void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
82void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
83void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
84void	nfe_rxeof(struct nfe_softc *);
85void	nfe_txeof(struct nfe_softc *);
86int	nfe_encap(struct nfe_softc *, struct mbuf *);
87void	nfe_start(struct ifnet *);
88void	nfe_watchdog(struct ifnet *);
89int	nfe_init(struct ifnet *);
90void	nfe_stop(struct ifnet *, int);
91struct	nfe_jbuf *nfe_jalloc(struct nfe_softc *);
92void	nfe_jfree(caddr_t, u_int, void *);
93int	nfe_jpool_alloc(struct nfe_softc *);
94void	nfe_jpool_free(struct nfe_softc *);
95int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
96void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
97void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
98int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
99void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
100void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
101int	nfe_ifmedia_upd(struct ifnet *);
102void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
103void	nfe_setmulti(struct nfe_softc *);
104void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
105void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
106void	nfe_tick(void *);
107
108struct cfattach nfe_ca = {
109	sizeof (struct nfe_softc), nfe_match, nfe_attach
110};
111
112struct cfdriver nfe_cd = {
113	NULL, "nfe", DV_IFNET
114};
115
116#ifdef NFE_DEBUG
117int nfedebug = 0;
118#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
119#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
120#else
121#define DPRINTF(x)
122#define DPRINTFN(n,x)
123#endif
124
125const struct pci_matchid nfe_devices[] = {
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
136	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
137	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
138	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
139	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
140	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
141	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
142	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
143	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
144	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
145	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
146	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
147	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
148	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
149	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
150	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
151	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
152	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
153	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
154	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
155	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
156	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
157	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
158	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
159	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
160	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
161	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
162	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
163	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
164	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }
165};
166
167int
168nfe_match(struct device *dev, void *match, void *aux)
169{
170	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
171	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
172}
173
174void
175nfe_attach(struct device *parent, struct device *self, void *aux)
176{
177	struct nfe_softc *sc = (struct nfe_softc *)self;
178	struct pci_attach_args *pa = aux;
179	pci_chipset_tag_t pc = pa->pa_pc;
180	pci_intr_handle_t ih;
181	const char *intrstr;
182	struct ifnet *ifp;
183	bus_size_t memsize;
184	pcireg_t memtype;
185
186	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
187	if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
188	    &sc->sc_memh, NULL, &memsize, 0)) {
189		printf(": can't map mem space\n");
190		return;
191	}
192
193	if (pci_intr_map(pa, &ih) != 0) {
194		printf(": can't map interrupt\n");
195		return;
196	}
197
198	intrstr = pci_intr_string(pc, ih);
199	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
200	    sc->sc_dev.dv_xname);
201	if (sc->sc_ih == NULL) {
202		printf(": could not establish interrupt");
203		if (intrstr != NULL)
204			printf(" at %s", intrstr);
205		printf("\n");
206		return;
207	}
208	printf(": %s", intrstr);
209
210	sc->sc_dmat = pa->pa_dmat;
211	sc->sc_flags = 0;
212
213	switch (PCI_PRODUCT(pa->pa_id)) {
214	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
215	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
216	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
217	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
218		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
219		break;
220	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
221	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
222		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
223		break;
224	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
225	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
226	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
227	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
228	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
229	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
230	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
231	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
232	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
233	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
234	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
235	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
236		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
237		    NFE_PWR_MGMT;
238		break;
239	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
240	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
241	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
242	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
243		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
244		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
245		break;
246	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
247	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
248	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
249	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
250		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
251		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
252		break;
253	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
254	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
255	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
256	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
257		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
258		break;
259	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
260	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
261	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
262	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
263		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
264		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
265		break;
266	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
267	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
268		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
269		    NFE_HW_VLAN | NFE_PWR_MGMT;
270		break;
271	}
272
273	if (sc->sc_flags & NFE_PWR_MGMT) {
274		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
275		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
276		DELAY(100);
277		NFE_WRITE(sc, NFE_MAC_RESET, 0);
278		DELAY(100);
279		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
280		NFE_WRITE(sc, NFE_PWR2_CTL,
281		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
282	}
283
284#ifdef notyet
285	/* enable jumbo frames for adapters that support it */
286	if (sc->sc_flags & NFE_JUMBO_SUP)
287		sc->sc_flags |= NFE_USE_JUMBO;
288#endif
289
290	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
291	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
292
293	/*
294	 * Allocate Tx and Rx rings.
295	 */
296	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
297		printf("%s: could not allocate Tx ring\n",
298		    sc->sc_dev.dv_xname);
299		return;
300	}
301
302	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
303		printf("%s: could not allocate Rx ring\n",
304		    sc->sc_dev.dv_xname);
305		nfe_free_tx_ring(sc, &sc->txq);
306		return;
307	}
308
309	ifp = &sc->sc_arpcom.ac_if;
310	ifp->if_softc = sc;
311	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
312	ifp->if_ioctl = nfe_ioctl;
313	ifp->if_start = nfe_start;
314	ifp->if_watchdog = nfe_watchdog;
315	ifp->if_init = nfe_init;
316	ifp->if_baudrate = IF_Gbps(1);
317	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
318	IFQ_SET_READY(&ifp->if_snd);
319	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
320
321	ifp->if_capabilities = IFCAP_VLAN_MTU;
322
323	if (sc->sc_flags & NFE_USE_JUMBO)
324		ifp->if_hardmtu = NFE_JUMBO_MTU;
325
326#if NVLAN > 0
327	if (sc->sc_flags & NFE_HW_VLAN)
328		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
329#endif
330
331	if (sc->sc_flags & NFE_HW_CSUM) {
332		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
333		    IFCAP_CSUM_UDPv4;
334	}
335
336	sc->sc_mii.mii_ifp = ifp;
337	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
338	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
339	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
340
341	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
342	    nfe_ifmedia_sts);
343	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
344	    MII_OFFSET_ANY, 0);
345	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
346		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
347		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
348		    0, NULL);
349		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
350	} else
351		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
352
353	if_attach(ifp);
354	ether_ifattach(ifp);
355
356	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
357
358	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
359}
360
361void
362nfe_power(int why, void *arg)
363{
364	struct nfe_softc *sc = arg;
365	struct ifnet *ifp;
366
367	if (why == PWR_RESUME) {
368		ifp = &sc->sc_arpcom.ac_if;
369		if (ifp->if_flags & IFF_UP) {
370			nfe_init(ifp);
371			if (ifp->if_flags & IFF_RUNNING)
372				nfe_start(ifp);
373		}
374	}
375}
376
377void
378nfe_miibus_statchg(struct device *dev)
379{
380	struct nfe_softc *sc = (struct nfe_softc *)dev;
381	struct mii_data *mii = &sc->sc_mii;
382	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
383
384	phy = NFE_READ(sc, NFE_PHY_IFACE);
385	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
386
387	seed = NFE_READ(sc, NFE_RNDSEED);
388	seed &= ~NFE_SEED_MASK;
389
390	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
391		phy  |= NFE_PHY_HDX;	/* half-duplex */
392		misc |= NFE_MISC1_HDX;
393	}
394
395	switch (IFM_SUBTYPE(mii->mii_media_active)) {
396	case IFM_1000_T:	/* full-duplex only */
397		link |= NFE_MEDIA_1000T;
398		seed |= NFE_SEED_1000T;
399		phy  |= NFE_PHY_1000T;
400		break;
401	case IFM_100_TX:
402		link |= NFE_MEDIA_100TX;
403		seed |= NFE_SEED_100TX;
404		phy  |= NFE_PHY_100TX;
405		break;
406	case IFM_10_T:
407		link |= NFE_MEDIA_10T;
408		seed |= NFE_SEED_10T;
409		break;
410	}
411
412	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
413
414	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
415	NFE_WRITE(sc, NFE_MISC1, misc);
416	NFE_WRITE(sc, NFE_LINKSPEED, link);
417}
418
419int
420nfe_miibus_readreg(struct device *dev, int phy, int reg)
421{
422	struct nfe_softc *sc = (struct nfe_softc *)dev;
423	uint32_t val;
424	int ntries;
425
426	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
427
428	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
429		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
430		DELAY(100);
431	}
432
433	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
434
435	for (ntries = 0; ntries < 1000; ntries++) {
436		DELAY(100);
437		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
438			break;
439	}
440	if (ntries == 1000) {
441		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
442		    sc->sc_dev.dv_xname));
443		return 0;
444	}
445
446	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
447		DPRINTFN(2, ("%s: could not read PHY\n",
448		    sc->sc_dev.dv_xname));
449		return 0;
450	}
451
452	val = NFE_READ(sc, NFE_PHY_DATA);
453	if (val != 0xffffffff && val != 0)
454		sc->mii_phyaddr = phy;
455
456	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
457	    sc->sc_dev.dv_xname, phy, reg, val));
458
459	return val;
460}
461
462void
463nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
464{
465	struct nfe_softc *sc = (struct nfe_softc *)dev;
466	uint32_t ctl;
467	int ntries;
468
469	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
470
471	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
472		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
473		DELAY(100);
474	}
475
476	NFE_WRITE(sc, NFE_PHY_DATA, val);
477	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
478	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
479
480	for (ntries = 0; ntries < 1000; ntries++) {
481		DELAY(100);
482		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
483			break;
484	}
485#ifdef NFE_DEBUG
486	if (nfedebug >= 2 && ntries == 1000)
487		printf("could not write to PHY\n");
488#endif
489}
490
491int
492nfe_intr(void *arg)
493{
494	struct nfe_softc *sc = arg;
495	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
496	uint32_t r;
497
498	if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0)
499		return 0;	/* not for us */
500	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
501
502	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
503
504	if (r & NFE_IRQ_LINK) {
505		NFE_READ(sc, NFE_PHY_STATUS);
506		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
507		DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
508	}
509
510	if (ifp->if_flags & IFF_RUNNING) {
511		/* check Rx ring */
512		nfe_rxeof(sc);
513
514		/* check Tx ring */
515		nfe_txeof(sc);
516	}
517
518	return 1;
519}
520
521int
522nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
523{
524	struct nfe_softc *sc = ifp->if_softc;
525	struct ifaddr *ifa = (struct ifaddr *)data;
526	struct ifreq *ifr = (struct ifreq *)data;
527	int s, error = 0;
528
529	s = splnet();
530
531	switch (cmd) {
532	case SIOCSIFADDR:
533		ifp->if_flags |= IFF_UP;
534		if (!(ifp->if_flags & IFF_RUNNING))
535			nfe_init(ifp);
536#ifdef INET
537		if (ifa->ifa_addr->sa_family == AF_INET)
538			arp_ifinit(&sc->sc_arpcom, ifa);
539#endif
540		break;
541
542	case SIOCSIFFLAGS:
543		if (ifp->if_flags & IFF_UP) {
544			/*
545			 * If only the PROMISC or ALLMULTI flag changes, then
546			 * don't do a full re-init of the chip, just update
547			 * the Rx filter.
548			 */
549			if ((ifp->if_flags & IFF_RUNNING) &&
550			    ((ifp->if_flags ^ sc->sc_if_flags) &
551			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
552				nfe_setmulti(sc);
553			} else {
554				if (!(ifp->if_flags & IFF_RUNNING))
555					nfe_init(ifp);
556			}
557		} else {
558			if (ifp->if_flags & IFF_RUNNING)
559				nfe_stop(ifp, 1);
560		}
561		sc->sc_if_flags = ifp->if_flags;
562		break;
563
564	case SIOCSIFMEDIA:
565	case SIOCGIFMEDIA:
566		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
567		break;
568
569	default:
570		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
571	}
572
573	if (error == ENETRESET) {
574		if (ifp->if_flags & IFF_RUNNING)
575			nfe_setmulti(sc);
576		error = 0;
577	}
578
579	splx(s);
580	return error;
581}
582
583void
584nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
585{
586	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
587	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
588	    sizeof (struct nfe_desc32), ops);
589}
590
591void
592nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
593{
594	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
595	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
596	    sizeof (struct nfe_desc64), ops);
597}
598
599void
600nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
601{
602	if (end > start) {
603		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
604		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
605		    (caddr_t)&sc->txq.desc32[end] -
606		    (caddr_t)&sc->txq.desc32[start], ops);
607		return;
608	}
609	/* sync from 'start' to end of ring */
610	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
611	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
612	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
613	    (caddr_t)&sc->txq.desc32[start], ops);
614
615	/* sync from start of ring to 'end' */
616	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
617	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
618}
619
620void
621nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
622{
623	if (end > start) {
624		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
625		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
626		    (caddr_t)&sc->txq.desc64[end] -
627		    (caddr_t)&sc->txq.desc64[start], ops);
628		return;
629	}
630	/* sync from 'start' to end of ring */
631	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
632	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
633	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
634	    (caddr_t)&sc->txq.desc64[start], ops);
635
636	/* sync from start of ring to 'end' */
637	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
638	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
639}
640
641void
642nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
643{
644	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
645	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
646	    sizeof (struct nfe_desc32), ops);
647}
648
649void
650nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
651{
652	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
653	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
654	    sizeof (struct nfe_desc64), ops);
655}
656
657void
658nfe_rxeof(struct nfe_softc *sc)
659{
660	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
661	struct nfe_desc32 *desc32;
662	struct nfe_desc64 *desc64;
663	struct nfe_rx_data *data;
664	struct nfe_jbuf *jbuf;
665	struct mbuf *m, *mnew;
666	bus_addr_t physaddr;
667#if NVLAN > 0
668	uint32_t vtag;
669#endif
670	uint16_t flags;
671	int error, len;
672
673	for (;;) {
674		data = &sc->rxq.data[sc->rxq.cur];
675
676		if (sc->sc_flags & NFE_40BIT_ADDR) {
677			desc64 = &sc->rxq.desc64[sc->rxq.cur];
678			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
679
680			flags = letoh16(desc64->flags);
681			len = letoh16(desc64->length) & 0x3fff;
682#if NVLAN > 0
683			vtag = letoh32(desc64->physaddr[1]);
684#endif
685		} else {
686			desc32 = &sc->rxq.desc32[sc->rxq.cur];
687			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
688
689			flags = letoh16(desc32->flags);
690			len = letoh16(desc32->length) & 0x3fff;
691		}
692
693		if (flags & NFE_RX_READY)
694			break;
695
696		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
697			if (!(flags & NFE_RX_VALID_V1))
698				goto skip;
699
700			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
701				flags &= ~NFE_RX_ERROR;
702				len--;	/* fix buffer length */
703			}
704		} else {
705			if (!(flags & NFE_RX_VALID_V2))
706				goto skip;
707
708			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
709				flags &= ~NFE_RX_ERROR;
710				len--;	/* fix buffer length */
711			}
712		}
713
714		if (flags & NFE_RX_ERROR) {
715			ifp->if_ierrors++;
716			goto skip;
717		}
718
719		/*
720		 * Try to allocate a new mbuf for this ring element and load
721		 * it before processing the current mbuf. If the ring element
722		 * cannot be loaded, drop the received packet and reuse the
723		 * old mbuf. In the unlikely case that the old mbuf can't be
724		 * reloaded either, explicitly panic.
725		 */
726		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
727		if (mnew == NULL) {
728			ifp->if_ierrors++;
729			goto skip;
730		}
731
732		if (sc->sc_flags & NFE_USE_JUMBO) {
733			if ((jbuf = nfe_jalloc(sc)) == NULL) {
734				m_freem(mnew);
735				ifp->if_ierrors++;
736				goto skip;
737			}
738			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
739
740			bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
741			    mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES,
742			    BUS_DMASYNC_POSTREAD);
743
744			physaddr = jbuf->physaddr;
745		} else {
746			MCLGET(mnew, M_DONTWAIT);
747			if (!(mnew->m_flags & M_EXT)) {
748				m_freem(mnew);
749				ifp->if_ierrors++;
750				goto skip;
751			}
752
753			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
754			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
755			bus_dmamap_unload(sc->sc_dmat, data->map);
756
757			error = bus_dmamap_load(sc->sc_dmat, data->map,
758			    mtod(mnew, void *), MCLBYTES, NULL,
759			    BUS_DMA_READ | BUS_DMA_NOWAIT);
760			if (error != 0) {
761				m_freem(mnew);
762
763				/* try to reload the old mbuf */
764				error = bus_dmamap_load(sc->sc_dmat, data->map,
765				    mtod(data->m, void *), MCLBYTES, NULL,
766				    BUS_DMA_READ | BUS_DMA_NOWAIT);
767				if (error != 0) {
768					/* very unlikely that it will fail.. */
769					panic("%s: could not load old rx mbuf",
770					    sc->sc_dev.dv_xname);
771				}
772				ifp->if_ierrors++;
773				goto skip;
774			}
775			physaddr = data->map->dm_segs[0].ds_addr;
776		}
777
778		/*
779		 * New mbuf successfully loaded, update Rx ring and continue
780		 * processing.
781		 */
782		m = data->m;
783		data->m = mnew;
784
785		/* finalize mbuf */
786		m->m_pkthdr.len = m->m_len = len;
787		m->m_pkthdr.rcvif = ifp;
788
789		if ((sc->sc_flags & NFE_HW_CSUM) &&
790		    (flags & NFE_RX_IP_CSUMOK)) {
791			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
792			if (flags & NFE_RX_UDP_CSUMOK)
793				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
794			if (flags & NFE_RX_TCP_CSUMOK)
795				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
796		}
797
798#if NVLAN > 0
799		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
800			m->m_pkthdr.ether_vtag = vtag & 0xffff;
801			m->m_flags |= M_VLANTAG;
802		}
803#endif
804
805#if NBPFILTER > 0
806		if (ifp->if_bpf)
807			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
808#endif
809		ifp->if_ipackets++;
810		ether_input_mbuf(ifp, m);
811
812		/* update mapping address in h/w descriptor */
813		if (sc->sc_flags & NFE_40BIT_ADDR) {
814#if defined(__LP64__)
815			desc64->physaddr[0] = htole32(physaddr >> 32);
816#endif
817			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
818		} else {
819			desc32->physaddr = htole32(physaddr);
820		}
821
822skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
823			desc64->length = htole16(sc->rxq.bufsz);
824			desc64->flags = htole16(NFE_RX_READY);
825
826			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
827		} else {
828			desc32->length = htole16(sc->rxq.bufsz);
829			desc32->flags = htole16(NFE_RX_READY);
830
831			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
832		}
833
834		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
835	}
836}
837
838void
839nfe_txeof(struct nfe_softc *sc)
840{
841	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
842	struct nfe_desc32 *desc32;
843	struct nfe_desc64 *desc64;
844	struct nfe_tx_data *data = NULL;
845	uint16_t flags;
846
847	while (sc->txq.next != sc->txq.cur) {
848		if (sc->sc_flags & NFE_40BIT_ADDR) {
849			desc64 = &sc->txq.desc64[sc->txq.next];
850			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
851
852			flags = letoh16(desc64->flags);
853		} else {
854			desc32 = &sc->txq.desc32[sc->txq.next];
855			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
856
857			flags = letoh16(desc32->flags);
858		}
859
860		if (flags & NFE_TX_VALID)
861			break;
862
863		data = &sc->txq.data[sc->txq.next];
864
865		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
866			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
867				goto skip;
868
869			if ((flags & NFE_TX_ERROR_V1) != 0) {
870				printf("%s: tx v1 error %b\n",
871				    sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
872				ifp->if_oerrors++;
873			} else
874				ifp->if_opackets++;
875		} else {
876			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
877				goto skip;
878
879			if ((flags & NFE_TX_ERROR_V2) != 0) {
880				printf("%s: tx v2 error %b\n",
881				    sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
882				ifp->if_oerrors++;
883			} else
884				ifp->if_opackets++;
885		}
886
887		if (data->m == NULL) {	/* should not get there */
888			printf("%s: last fragment bit w/o associated mbuf!\n",
889			    sc->sc_dev.dv_xname);
890			goto skip;
891		}
892
893		/* last fragment of the mbuf chain transmitted */
894		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
895		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
896		bus_dmamap_unload(sc->sc_dmat, data->active);
897		m_freem(data->m);
898		data->m = NULL;
899
900		ifp->if_timer = 0;
901
902skip:		sc->txq.queued--;
903		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
904	}
905
906	if (data != NULL) {	/* at least one slot freed */
907		ifp->if_flags &= ~IFF_OACTIVE;
908		nfe_start(ifp);
909	}
910}
911
912int
913nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
914{
915	struct nfe_desc32 *desc32;
916	struct nfe_desc64 *desc64;
917	struct nfe_tx_data *data;
918	bus_dmamap_t map;
919	uint16_t flags = 0;
920	uint32_t vtag = 0;
921	int error, i, first = sc->txq.cur;
922
923	map = sc->txq.data[first].map;
924
925	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
926	if (error != 0) {
927		printf("%s: can't map mbuf (error %d)\n",
928		    sc->sc_dev.dv_xname, error);
929		return error;
930	}
931
932	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
933		bus_dmamap_unload(sc->sc_dmat, map);
934		return ENOBUFS;
935	}
936
937#if NVLAN > 0
938	/* setup h/w VLAN tagging */
939	if (m0->m_flags & M_VLANTAG)
940		vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag;
941#endif
942	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
943		flags |= NFE_TX_IP_CSUM;
944	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
945		flags |= NFE_TX_TCP_UDP_CSUM;
946
947	for (i = 0; i < map->dm_nsegs; i++) {
948		data = &sc->txq.data[sc->txq.cur];
949
950		if (sc->sc_flags & NFE_40BIT_ADDR) {
951			desc64 = &sc->txq.desc64[sc->txq.cur];
952#if defined(__LP64__)
953			desc64->physaddr[0] =
954			    htole32(map->dm_segs[i].ds_addr >> 32);
955#endif
956			desc64->physaddr[1] =
957			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
958			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
959			desc64->flags = htole16(flags);
960			desc64->vtag = htole32(vtag);
961		} else {
962			desc32 = &sc->txq.desc32[sc->txq.cur];
963
964			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
965			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
966			desc32->flags = htole16(flags);
967		}
968
969		if (map->dm_nsegs > 1) {
970			/*
971			 * Checksum flags and vtag belong to the first fragment
972			 * only.
973			 */
974			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
975			vtag = 0;
976
977			/*
978			 * Setting of the valid bit in the first descriptor is
979			 * deferred until the whole chain is fully setup.
980			 */
981			flags |= NFE_TX_VALID;
982		}
983
984		sc->txq.queued++;
985		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
986	}
987
988	/* the whole mbuf chain has been setup */
989	if (sc->sc_flags & NFE_40BIT_ADDR) {
990		/* fix last descriptor */
991		flags |= NFE_TX_LASTFRAG_V2;
992		desc64->flags = htole16(flags);
993
994		/* finally, set the valid bit in the first descriptor */
995		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
996	} else {
997		/* fix last descriptor */
998		if (sc->sc_flags & NFE_JUMBO_SUP)
999			flags |= NFE_TX_LASTFRAG_V2;
1000		else
1001			flags |= NFE_TX_LASTFRAG_V1;
1002		desc32->flags = htole16(flags);
1003
1004		/* finally, set the valid bit in the first descriptor */
1005		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
1006	}
1007
1008	data->m = m0;
1009	data->active = map;
1010
1011	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1012	    BUS_DMASYNC_PREWRITE);
1013
1014	return 0;
1015}
1016
1017void
1018nfe_start(struct ifnet *ifp)
1019{
1020	struct nfe_softc *sc = ifp->if_softc;
1021	int old = sc->txq.cur;
1022	struct mbuf *m0;
1023
1024	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1025		return;
1026
1027	for (;;) {
1028		IFQ_POLL(&ifp->if_snd, m0);
1029		if (m0 == NULL)
1030			break;
1031
1032		if (nfe_encap(sc, m0) != 0) {
1033			ifp->if_flags |= IFF_OACTIVE;
1034			break;
1035		}
1036
1037		/* packet put in h/w queue, remove from s/w queue */
1038		IFQ_DEQUEUE(&ifp->if_snd, m0);
1039
1040#if NBPFILTER > 0
1041		if (ifp->if_bpf != NULL)
1042			bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1043#endif
1044	}
1045	if (sc->txq.cur == old)	/* nothing sent */
1046		return;
1047
1048	if (sc->sc_flags & NFE_40BIT_ADDR)
1049		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1050	else
1051		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1052
1053	/* kick Tx */
1054	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1055
1056	/*
1057	 * Set a timeout in case the chip goes out to lunch.
1058	 */
1059	ifp->if_timer = 5;
1060}
1061
1062void
1063nfe_watchdog(struct ifnet *ifp)
1064{
1065	struct nfe_softc *sc = ifp->if_softc;
1066
1067	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1068
1069	nfe_init(ifp);
1070
1071	ifp->if_oerrors++;
1072}
1073
1074int
1075nfe_init(struct ifnet *ifp)
1076{
1077	struct nfe_softc *sc = ifp->if_softc;
1078	uint32_t tmp;
1079
1080	nfe_stop(ifp, 0);
1081
1082	NFE_WRITE(sc, NFE_TX_UNK, 0);
1083	NFE_WRITE(sc, NFE_STATUS, 0);
1084
1085	sc->rxtxctl = NFE_RXTX_BIT2;
1086	if (sc->sc_flags & NFE_40BIT_ADDR)
1087		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1088	else if (sc->sc_flags & NFE_JUMBO_SUP)
1089		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1090
1091	if (sc->sc_flags & NFE_HW_CSUM)
1092		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1093	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1094		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
1095
1096	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1097	DELAY(10);
1098	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1099
1100	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1101		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1102	else
1103		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
1104
1105	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1106
1107	/* set MAC address */
1108	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1109
1110	/* tell MAC where rings are in memory */
1111#ifdef __LP64__
1112	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1113#endif
1114	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1115#ifdef __LP64__
1116	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1117#endif
1118	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1119
1120	NFE_WRITE(sc, NFE_RING_SIZE,
1121	    (NFE_RX_RING_COUNT - 1) << 16 |
1122	    (NFE_TX_RING_COUNT - 1));
1123
1124	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1125
1126	/* force MAC to wakeup */
1127	tmp = NFE_READ(sc, NFE_PWR_STATE);
1128	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1129	DELAY(10);
1130	tmp = NFE_READ(sc, NFE_PWR_STATE);
1131	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1132
1133#if 1
1134	/* configure interrupts coalescing/mitigation */
1135	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1136#else
1137	/* no interrupt mitigation: one interrupt per packet */
1138	NFE_WRITE(sc, NFE_IMTIMER, 970);
1139#endif
1140
1141	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1142	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1143	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1144
1145	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1146	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1147
1148	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1149	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1150
1151	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1152	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1153	DELAY(10);
1154	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1155
1156	/* set Rx filter */
1157	nfe_setmulti(sc);
1158
1159	nfe_ifmedia_upd(ifp);
1160
1161	/* enable Rx */
1162	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1163
1164	/* enable Tx */
1165	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1166
1167	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1168
1169	/* enable interrupts */
1170	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1171
1172	timeout_add_sec(&sc->sc_tick_ch, 1);
1173
1174	ifp->if_flags |= IFF_RUNNING;
1175	ifp->if_flags &= ~IFF_OACTIVE;
1176
1177	return 0;
1178}
1179
1180void
1181nfe_stop(struct ifnet *ifp, int disable)
1182{
1183	struct nfe_softc *sc = ifp->if_softc;
1184
1185	timeout_del(&sc->sc_tick_ch);
1186
1187	ifp->if_timer = 0;
1188	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1189
1190	mii_down(&sc->sc_mii);
1191
1192	/* abort Tx */
1193	NFE_WRITE(sc, NFE_TX_CTL, 0);
1194
1195	/* disable Rx */
1196	NFE_WRITE(sc, NFE_RX_CTL, 0);
1197
1198	/* disable interrupts */
1199	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1200
1201	/* reset Tx and Rx rings */
1202	nfe_reset_tx_ring(sc, &sc->txq);
1203	nfe_reset_rx_ring(sc, &sc->rxq);
1204}
1205
1206int
1207nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1208{
1209	struct nfe_desc32 *desc32;
1210	struct nfe_desc64 *desc64;
1211	struct nfe_rx_data *data;
1212	struct nfe_jbuf *jbuf;
1213	void **desc;
1214	bus_addr_t physaddr;
1215	int i, nsegs, error, descsize;
1216
1217	if (sc->sc_flags & NFE_40BIT_ADDR) {
1218		desc = (void **)&ring->desc64;
1219		descsize = sizeof (struct nfe_desc64);
1220	} else {
1221		desc = (void **)&ring->desc32;
1222		descsize = sizeof (struct nfe_desc32);
1223	}
1224
1225	ring->cur = ring->next = 0;
1226	ring->bufsz = MCLBYTES;
1227
1228	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1229	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1230	if (error != 0) {
1231		printf("%s: could not create desc DMA map\n",
1232		    sc->sc_dev.dv_xname);
1233		goto fail;
1234	}
1235
1236	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1237	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1238	if (error != 0) {
1239		printf("%s: could not allocate DMA memory\n",
1240		    sc->sc_dev.dv_xname);
1241		goto fail;
1242	}
1243
1244	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1245	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1246	if (error != 0) {
1247		printf("%s: can't map desc DMA memory\n",
1248		    sc->sc_dev.dv_xname);
1249		goto fail;
1250	}
1251
1252	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1253	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1254	if (error != 0) {
1255		printf("%s: could not load desc DMA map\n",
1256		    sc->sc_dev.dv_xname);
1257		goto fail;
1258	}
1259
1260	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1261	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1262
1263	if (sc->sc_flags & NFE_USE_JUMBO) {
1264		ring->bufsz = NFE_JBYTES;
1265		if ((error = nfe_jpool_alloc(sc)) != 0) {
1266			printf("%s: could not allocate jumbo frames\n",
1267			    sc->sc_dev.dv_xname);
1268			goto fail;
1269		}
1270	}
1271
1272	/*
1273	 * Pre-allocate Rx buffers and populate Rx ring.
1274	 */
1275	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1276		data = &sc->rxq.data[i];
1277
1278		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1279		if (data->m == NULL) {
1280			printf("%s: could not allocate rx mbuf\n",
1281			    sc->sc_dev.dv_xname);
1282			error = ENOMEM;
1283			goto fail;
1284		}
1285
1286		if (sc->sc_flags & NFE_USE_JUMBO) {
1287			if ((jbuf = nfe_jalloc(sc)) == NULL) {
1288				printf("%s: could not allocate jumbo buffer\n",
1289				    sc->sc_dev.dv_xname);
1290				goto fail;
1291			}
1292			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1293			    sc);
1294
1295			physaddr = jbuf->physaddr;
1296		} else {
1297			error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1298			    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1299			if (error != 0) {
1300				printf("%s: could not create DMA map\n",
1301				    sc->sc_dev.dv_xname);
1302				goto fail;
1303			}
1304			MCLGET(data->m, M_DONTWAIT);
1305			if (!(data->m->m_flags & M_EXT)) {
1306				printf("%s: could not allocate mbuf cluster\n",
1307				    sc->sc_dev.dv_xname);
1308				error = ENOMEM;
1309				goto fail;
1310			}
1311
1312			error = bus_dmamap_load(sc->sc_dmat, data->map,
1313			    mtod(data->m, void *), MCLBYTES, NULL,
1314			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1315			if (error != 0) {
1316				printf("%s: could not load rx buf DMA map",
1317				    sc->sc_dev.dv_xname);
1318				goto fail;
1319			}
1320			physaddr = data->map->dm_segs[0].ds_addr;
1321		}
1322
1323		if (sc->sc_flags & NFE_40BIT_ADDR) {
1324			desc64 = &sc->rxq.desc64[i];
1325#if defined(__LP64__)
1326			desc64->physaddr[0] = htole32(physaddr >> 32);
1327#endif
1328			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1329			desc64->length = htole16(sc->rxq.bufsz);
1330			desc64->flags = htole16(NFE_RX_READY);
1331		} else {
1332			desc32 = &sc->rxq.desc32[i];
1333			desc32->physaddr = htole32(physaddr);
1334			desc32->length = htole16(sc->rxq.bufsz);
1335			desc32->flags = htole16(NFE_RX_READY);
1336		}
1337	}
1338
1339	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1340	    BUS_DMASYNC_PREWRITE);
1341
1342	return 0;
1343
1344fail:	nfe_free_rx_ring(sc, ring);
1345	return error;
1346}
1347
1348void
1349nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1350{
1351	int i;
1352
1353	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1354		if (sc->sc_flags & NFE_40BIT_ADDR) {
1355			ring->desc64[i].length = htole16(ring->bufsz);
1356			ring->desc64[i].flags = htole16(NFE_RX_READY);
1357		} else {
1358			ring->desc32[i].length = htole16(ring->bufsz);
1359			ring->desc32[i].flags = htole16(NFE_RX_READY);
1360		}
1361	}
1362
1363	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1364	    BUS_DMASYNC_PREWRITE);
1365
1366	ring->cur = ring->next = 0;
1367}
1368
1369void
1370nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1371{
1372	struct nfe_rx_data *data;
1373	void *desc;
1374	int i, descsize;
1375
1376	if (sc->sc_flags & NFE_40BIT_ADDR) {
1377		desc = ring->desc64;
1378		descsize = sizeof (struct nfe_desc64);
1379	} else {
1380		desc = ring->desc32;
1381		descsize = sizeof (struct nfe_desc32);
1382	}
1383
1384	if (desc != NULL) {
1385		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1386		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1387		bus_dmamap_unload(sc->sc_dmat, ring->map);
1388		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1389		    NFE_RX_RING_COUNT * descsize);
1390		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1391	}
1392
1393	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1394		data = &ring->data[i];
1395
1396		if (data->map != NULL) {
1397			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1398			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1399			bus_dmamap_unload(sc->sc_dmat, data->map);
1400			bus_dmamap_destroy(sc->sc_dmat, data->map);
1401		}
1402		if (data->m != NULL)
1403			m_freem(data->m);
1404	}
1405}
1406
1407struct nfe_jbuf *
1408nfe_jalloc(struct nfe_softc *sc)
1409{
1410	struct nfe_jbuf *jbuf;
1411
1412	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1413	if (jbuf == NULL)
1414		return NULL;
1415	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1416	return jbuf;
1417}
1418
1419/*
1420 * This is called automatically by the network stack when the mbuf is freed.
1421 * Caution must be taken that the NIC might be reset by the time the mbuf is
1422 * freed.
1423 */
1424void
1425nfe_jfree(caddr_t buf, u_int size, void *arg)
1426{
1427	struct nfe_softc *sc = arg;
1428	struct nfe_jbuf *jbuf;
1429	int i;
1430
1431	/* find the jbuf from the base pointer */
1432	i = (buf - sc->rxq.jpool) / NFE_JBYTES;
1433	if (i < 0 || i >= NFE_JPOOL_COUNT) {
1434		printf("%s: request to free a buffer (%p) not managed by us\n",
1435		    sc->sc_dev.dv_xname, buf);
1436		return;
1437	}
1438	jbuf = &sc->rxq.jbuf[i];
1439
1440	/* ..and put it back in the free list */
1441	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1442}
1443
1444int
1445nfe_jpool_alloc(struct nfe_softc *sc)
1446{
1447	struct nfe_rx_ring *ring = &sc->rxq;
1448	struct nfe_jbuf *jbuf;
1449	bus_addr_t physaddr;
1450	caddr_t buf;
1451	int i, nsegs, error;
1452
1453	/*
1454	 * Allocate a big chunk of DMA'able memory.
1455	 */
1456	error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1457	    NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1458	if (error != 0) {
1459		printf("%s: could not create jumbo DMA map\n",
1460		    sc->sc_dev.dv_xname);
1461		goto fail;
1462	}
1463
1464	error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1465	    &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1466	if (error != 0) {
1467		printf("%s could not allocate jumbo DMA memory\n",
1468		    sc->sc_dev.dv_xname);
1469		goto fail;
1470	}
1471
1472	error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1473	    &ring->jpool, BUS_DMA_NOWAIT);
1474	if (error != 0) {
1475		printf("%s: can't map jumbo DMA memory\n",
1476		    sc->sc_dev.dv_xname);
1477		goto fail;
1478	}
1479
1480	error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1481	    NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1482	if (error != 0) {
1483		printf("%s: could not load jumbo DMA map\n",
1484		    sc->sc_dev.dv_xname);
1485		goto fail;
1486	}
1487
1488	/* ..and split it into 9KB chunks */
1489	SLIST_INIT(&ring->jfreelist);
1490
1491	buf = ring->jpool;
1492	physaddr = ring->jmap->dm_segs[0].ds_addr;
1493	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1494		jbuf = &ring->jbuf[i];
1495
1496		jbuf->buf = buf;
1497		jbuf->physaddr = physaddr;
1498
1499		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1500
1501		buf += NFE_JBYTES;
1502		physaddr += NFE_JBYTES;
1503	}
1504
1505	return 0;
1506
1507fail:	nfe_jpool_free(sc);
1508	return error;
1509}
1510
1511void
1512nfe_jpool_free(struct nfe_softc *sc)
1513{
1514	struct nfe_rx_ring *ring = &sc->rxq;
1515
1516	if (ring->jmap != NULL) {
1517		bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1518		    ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1519		bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1520		bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1521	}
1522	if (ring->jpool != NULL) {
1523		bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1524		bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1525	}
1526}
1527
1528int
1529nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1530{
1531	int i, nsegs, error;
1532	void **desc;
1533	int descsize;
1534
1535	if (sc->sc_flags & NFE_40BIT_ADDR) {
1536		desc = (void **)&ring->desc64;
1537		descsize = sizeof (struct nfe_desc64);
1538	} else {
1539		desc = (void **)&ring->desc32;
1540		descsize = sizeof (struct nfe_desc32);
1541	}
1542
1543	ring->queued = 0;
1544	ring->cur = ring->next = 0;
1545
1546	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1547	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1548
1549	if (error != 0) {
1550		printf("%s: could not create desc DMA map\n",
1551		    sc->sc_dev.dv_xname);
1552		goto fail;
1553	}
1554
1555	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1556	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1557	if (error != 0) {
1558		printf("%s: could not allocate DMA memory\n",
1559		    sc->sc_dev.dv_xname);
1560		goto fail;
1561	}
1562
1563	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1564	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1565	if (error != 0) {
1566		printf("%s: can't map desc DMA memory\n",
1567		    sc->sc_dev.dv_xname);
1568		goto fail;
1569	}
1570
1571	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1572	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1573	if (error != 0) {
1574		printf("%s: could not load desc DMA map\n",
1575		    sc->sc_dev.dv_xname);
1576		goto fail;
1577	}
1578
1579	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1580	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1581
1582	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1583		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1584		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1585		    &ring->data[i].map);
1586		if (error != 0) {
1587			printf("%s: could not create DMA map\n",
1588			    sc->sc_dev.dv_xname);
1589			goto fail;
1590		}
1591	}
1592
1593	return 0;
1594
1595fail:	nfe_free_tx_ring(sc, ring);
1596	return error;
1597}
1598
1599void
1600nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1601{
1602	struct nfe_tx_data *data;
1603	int i;
1604
1605	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1606		if (sc->sc_flags & NFE_40BIT_ADDR)
1607			ring->desc64[i].flags = 0;
1608		else
1609			ring->desc32[i].flags = 0;
1610
1611		data = &ring->data[i];
1612
1613		if (data->m != NULL) {
1614			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1615			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1616			bus_dmamap_unload(sc->sc_dmat, data->active);
1617			m_freem(data->m);
1618			data->m = NULL;
1619		}
1620	}
1621
1622	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1623	    BUS_DMASYNC_PREWRITE);
1624
1625	ring->queued = 0;
1626	ring->cur = ring->next = 0;
1627}
1628
1629void
1630nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1631{
1632	struct nfe_tx_data *data;
1633	void *desc;
1634	int i, descsize;
1635
1636	if (sc->sc_flags & NFE_40BIT_ADDR) {
1637		desc = ring->desc64;
1638		descsize = sizeof (struct nfe_desc64);
1639	} else {
1640		desc = ring->desc32;
1641		descsize = sizeof (struct nfe_desc32);
1642	}
1643
1644	if (desc != NULL) {
1645		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1646		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1647		bus_dmamap_unload(sc->sc_dmat, ring->map);
1648		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1649		    NFE_TX_RING_COUNT * descsize);
1650		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1651	}
1652
1653	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1654		data = &ring->data[i];
1655
1656		if (data->m != NULL) {
1657			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1658			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1659			bus_dmamap_unload(sc->sc_dmat, data->active);
1660			m_freem(data->m);
1661		}
1662	}
1663
1664	/* ..and now actually destroy the DMA mappings */
1665	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1666		data = &ring->data[i];
1667		if (data->map == NULL)
1668			continue;
1669		bus_dmamap_destroy(sc->sc_dmat, data->map);
1670	}
1671}
1672
1673int
1674nfe_ifmedia_upd(struct ifnet *ifp)
1675{
1676	struct nfe_softc *sc = ifp->if_softc;
1677	struct mii_data *mii = &sc->sc_mii;
1678	struct mii_softc *miisc;
1679
1680	if (mii->mii_instance != 0) {
1681		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1682			mii_phy_reset(miisc);
1683	}
1684	return mii_mediachg(mii);
1685}
1686
1687void
1688nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1689{
1690	struct nfe_softc *sc = ifp->if_softc;
1691	struct mii_data *mii = &sc->sc_mii;
1692
1693	mii_pollstat(mii);
1694	ifmr->ifm_status = mii->mii_media_status;
1695	ifmr->ifm_active = mii->mii_media_active;
1696}
1697
1698void
1699nfe_setmulti(struct nfe_softc *sc)
1700{
1701	struct arpcom *ac = &sc->sc_arpcom;
1702	struct ifnet *ifp = &ac->ac_if;
1703	struct ether_multi *enm;
1704	struct ether_multistep step;
1705	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1706	uint32_t filter = NFE_RXFILTER_MAGIC;
1707	int i;
1708
1709	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1710		bzero(addr, ETHER_ADDR_LEN);
1711		bzero(mask, ETHER_ADDR_LEN);
1712		goto done;
1713	}
1714
1715	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1716	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1717
1718	ETHER_FIRST_MULTI(step, ac, enm);
1719	while (enm != NULL) {
1720		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1721			ifp->if_flags |= IFF_ALLMULTI;
1722			bzero(addr, ETHER_ADDR_LEN);
1723			bzero(mask, ETHER_ADDR_LEN);
1724			goto done;
1725		}
1726		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1727			addr[i] &=  enm->enm_addrlo[i];
1728			mask[i] &= ~enm->enm_addrlo[i];
1729		}
1730		ETHER_NEXT_MULTI(step, enm);
1731	}
1732	for (i = 0; i < ETHER_ADDR_LEN; i++)
1733		mask[i] |= addr[i];
1734
1735done:
1736	addr[0] |= 0x01;	/* make sure multicast bit is set */
1737
1738	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1739	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1740	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1741	    addr[5] <<  8 | addr[4]);
1742	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1743	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1744	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1745	    mask[5] <<  8 | mask[4]);
1746
1747	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1748	NFE_WRITE(sc, NFE_RXFILTER, filter);
1749}
1750
1751void
1752nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1753{
1754	uint32_t tmp;
1755
1756	if (sc->sc_flags & NFE_CORRECT_MACADDR) {
1757		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1758		addr[0] = (tmp & 0xff);
1759		addr[1] = (tmp >>  8) & 0xff;
1760		addr[2] = (tmp >> 16) & 0xff;
1761		addr[3] = (tmp >> 24) & 0xff;
1762
1763		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1764		addr[4] = (tmp & 0xff);
1765		addr[5] = (tmp >> 8) & 0xff;
1766
1767	} else {
1768		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1769		addr[0] = (tmp >> 8) & 0xff;
1770		addr[1] = (tmp & 0xff);
1771
1772		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1773		addr[2] = (tmp >> 24) & 0xff;
1774		addr[3] = (tmp >> 16) & 0xff;
1775		addr[4] = (tmp >>  8) & 0xff;
1776		addr[5] = (tmp & 0xff);
1777	}
1778}
1779
1780void
1781nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1782{
1783	NFE_WRITE(sc, NFE_MACADDR_LO,
1784	    addr[5] <<  8 | addr[4]);
1785	NFE_WRITE(sc, NFE_MACADDR_HI,
1786	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1787}
1788
1789void
1790nfe_tick(void *arg)
1791{
1792	struct nfe_softc *sc = arg;
1793	int s;
1794
1795	s = splnet();
1796	mii_tick(&sc->sc_mii);
1797	splx(s);
1798
1799	timeout_add_sec(&sc->sc_tick_ch, 1);
1800}
1801