if_nfe.c revision 1.40
1/*	$OpenBSD: if_nfe.c,v 1.40 2006/02/15 20:21:27 brad Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/queue.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/device.h>
35#include <sys/socket.h>
36
37#include <machine/bus.h>
38
39#include <net/if.h>
40#include <net/if_dl.h>
41#include <net/if_media.h>
42
43#ifdef INET
44#include <netinet/in.h>
45#include <netinet/in_systm.h>
46#include <netinet/in_var.h>
47#include <netinet/ip.h>
48#include <netinet/if_ether.h>
49#endif
50
51#if NVLAN > 0
52#include <net/if_types.h>
53#include <net/if_vlan_var.h>
54#endif
55
56#if NBPFILTER > 0
57#include <net/bpf.h>
58#endif
59
60#include <dev/mii/mii.h>
61#include <dev/mii/miivar.h>
62
63#include <dev/pci/pcireg.h>
64#include <dev/pci/pcivar.h>
65#include <dev/pci/pcidevs.h>
66
67#include <dev/pci/if_nfereg.h>
68#include <dev/pci/if_nfevar.h>
69
70int	nfe_match(struct device *, void *, void *);
71void	nfe_attach(struct device *, struct device *, void *);
72void	nfe_power(int, void *);
73void	nfe_miibus_statchg(struct device *);
74int	nfe_miibus_readreg(struct device *, int, int);
75void	nfe_miibus_writereg(struct device *, int, int, int);
76int	nfe_intr(void *);
77int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
78void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
79void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
80void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
81void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
82void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
83void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
84void	nfe_rxeof(struct nfe_softc *);
85void	nfe_txeof(struct nfe_softc *);
86int	nfe_encap(struct nfe_softc *, struct mbuf *);
87void	nfe_start(struct ifnet *);
88void	nfe_watchdog(struct ifnet *);
89int	nfe_init(struct ifnet *);
90void	nfe_stop(struct ifnet *, int);
91struct	nfe_jbuf *nfe_jalloc(struct nfe_softc *);
92void	nfe_jfree(caddr_t, u_int, void *);
93int	nfe_jpool_alloc(struct nfe_softc *);
94void	nfe_jpool_free(struct nfe_softc *);
95int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
96void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
97void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
98int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
99void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
100void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
101int	nfe_ifmedia_upd(struct ifnet *);
102void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
103void	nfe_setmulti(struct nfe_softc *);
104void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
105void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
106void	nfe_tick(void *);
107
108struct cfattach nfe_ca = {
109	sizeof (struct nfe_softc), nfe_match, nfe_attach
110};
111
112struct cfdriver nfe_cd = {
113	NULL, "nfe", DV_IFNET
114};
115
116#define NFE_DEBUG
117/*#define NFE_NO_JUMBO*/
118
119#ifdef NFE_DEBUG
120int nfedebug = 1;
121#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
122#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
123#else
124#define DPRINTF(x)
125#define DPRINTFN(n,x)
126#endif
127
128const struct pci_matchid nfe_devices[] = {
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
136	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
137	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
138	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
139	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
140	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
141	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
142	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
143	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }
144};
145
146int
147nfe_match(struct device *dev, void *match, void *aux)
148{
149	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
150	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
151}
152
153void
154nfe_attach(struct device *parent, struct device *self, void *aux)
155{
156	struct nfe_softc *sc = (struct nfe_softc *)self;
157	struct pci_attach_args *pa = aux;
158	pci_chipset_tag_t pc = pa->pa_pc;
159	pci_intr_handle_t ih;
160	const char *intrstr;
161	struct ifnet *ifp;
162	bus_size_t memsize;
163	pcireg_t memtype;
164
165	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
166	switch (memtype) {
167	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
168	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
169		if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
170		    &sc->sc_memh, NULL, &memsize, 0) == 0)
171			break;
172		/* FALLTHROUGH */
173	default:
174		printf(": could not map mem space\n");
175		return;
176	}
177
178	if (pci_intr_map(pa, &ih) != 0) {
179		printf(": could not map interrupt\n");
180		return;
181	}
182
183	intrstr = pci_intr_string(pc, ih);
184	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
185	    sc->sc_dev.dv_xname);
186	if (sc->sc_ih == NULL) {
187		printf(": could not establish interrupt");
188		if (intrstr != NULL)
189			printf(" at %s", intrstr);
190		printf("\n");
191		return;
192	}
193	printf(": %s", intrstr);
194
195	sc->sc_dmat = pa->pa_dmat;
196
197	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
198	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
199
200	sc->sc_flags = 0;
201
202	switch (PCI_PRODUCT(pa->pa_id)) {
203	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
204	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
205	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
206	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
207		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
208		break;
209	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
210	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
211		sc->sc_flags |= NFE_40BIT_ADDR;
212		break;
213	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
214	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
215	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
216	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
217		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
218		break;
219	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
220	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
221		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
222		    0/*NFE_HW_VLAN*/;
223		break;
224	}
225
226#ifndef NFE_NO_JUMBO
227	/* enable jumbo frames for adapters that support it */
228	if (sc->sc_flags & NFE_JUMBO_SUP)
229		sc->sc_flags |= NFE_USE_JUMBO;
230#endif
231
232	/*
233	 * Allocate Tx and Rx rings.
234	 */
235	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
236		printf("%s: could not allocate Tx ring\n",
237		    sc->sc_dev.dv_xname);
238		return;
239	}
240
241	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
242		printf("%s: could not allocate Rx ring\n",
243		    sc->sc_dev.dv_xname);
244		nfe_free_tx_ring(sc, &sc->txq);
245		return;
246	}
247
248	ifp = &sc->sc_arpcom.ac_if;
249	ifp->if_softc = sc;
250	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
251	ifp->if_ioctl = nfe_ioctl;
252	ifp->if_start = nfe_start;
253	ifp->if_watchdog = nfe_watchdog;
254	ifp->if_init = nfe_init;
255	ifp->if_baudrate = IF_Gbps(1);
256	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
257	IFQ_SET_READY(&ifp->if_snd);
258	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
259
260	ifp->if_capabilities = IFCAP_VLAN_MTU;
261
262#if NVLAN > 0
263	if (sc->sc_flags & NFE_HW_VLAN)
264		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
265#endif
266#ifdef NFE_CSUM
267	if (sc->sc_flags & NFE_HW_CSUM) {
268		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
269		    IFCAP_CSUM_UDPv4;
270	}
271#endif
272
273	sc->sc_mii.mii_ifp = ifp;
274	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
275	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
276	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
277
278	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
279	    nfe_ifmedia_sts);
280	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
281	    MII_OFFSET_ANY, 0);
282	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
283		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
284		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
285		    0, NULL);
286		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
287	} else
288		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
289
290	if_attach(ifp);
291	ether_ifattach(ifp);
292
293	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
294
295	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
296}
297
298void
299nfe_power(int why, void *arg)
300{
301	struct nfe_softc *sc = arg;
302	struct ifnet *ifp;
303
304	if (why == PWR_RESUME) {
305		ifp = &sc->sc_arpcom.ac_if;
306		if (ifp->if_flags & IFF_UP) {
307			ifp->if_flags &= ~IFF_RUNNING;
308			nfe_init(ifp);
309			if (ifp->if_flags & IFF_RUNNING)
310				nfe_start(ifp);
311		}
312	}
313}
314
315void
316nfe_miibus_statchg(struct device *dev)
317{
318	struct nfe_softc *sc = (struct nfe_softc *)dev;
319	struct mii_data *mii = &sc->sc_mii;
320	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
321
322	phy = NFE_READ(sc, NFE_PHY_IFACE);
323	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
324
325	seed = NFE_READ(sc, NFE_RNDSEED);
326	seed &= ~NFE_SEED_MASK;
327
328	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
329		phy  |= NFE_PHY_HDX;	/* half-duplex */
330		misc |= NFE_MISC1_HDX;
331	}
332
333	switch (IFM_SUBTYPE(mii->mii_media_active)) {
334	case IFM_1000_T:	/* full-duplex only */
335		link |= NFE_MEDIA_1000T;
336		seed |= NFE_SEED_1000T;
337		phy  |= NFE_PHY_1000T;
338		break;
339	case IFM_100_TX:
340		link |= NFE_MEDIA_100TX;
341		seed |= NFE_SEED_100TX;
342		phy  |= NFE_PHY_100TX;
343		break;
344	case IFM_10_T:
345		link |= NFE_MEDIA_10T;
346		seed |= NFE_SEED_10T;
347		break;
348	}
349
350	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
351
352	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
353	NFE_WRITE(sc, NFE_MISC1, misc);
354	NFE_WRITE(sc, NFE_LINKSPEED, link);
355}
356
357int
358nfe_miibus_readreg(struct device *dev, int phy, int reg)
359{
360	struct nfe_softc *sc = (struct nfe_softc *)dev;
361	uint32_t val;
362	int ntries;
363
364	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
365
366	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
367		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
368		DELAY(100);
369	}
370
371	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
372
373	for (ntries = 0; ntries < 1000; ntries++) {
374		DELAY(100);
375		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
376			break;
377	}
378	if (ntries == 1000) {
379		DPRINTFN(2, ("timeout waiting for PHY\n"));
380		return 0;
381	}
382
383	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
384		DPRINTFN(2, ("could not read PHY\n"));
385		return 0;
386	}
387
388	val = NFE_READ(sc, NFE_PHY_DATA);
389	if (val != 0xffffffff && val != 0)
390		sc->phyaddr = phy;
391
392	DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val));
393
394	return val;
395}
396
397void
398nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
399{
400	struct nfe_softc *sc = (struct nfe_softc *)dev;
401	uint32_t ctl;
402	int ntries;
403
404	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
405
406	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
407		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
408		DELAY(100);
409	}
410
411	NFE_WRITE(sc, NFE_PHY_DATA, val);
412	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
413	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
414
415	for (ntries = 0; ntries < 1000; ntries++) {
416		DELAY(100);
417		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
418			break;
419	}
420#ifdef NFE_DEBUG
421	if (nfedebug >= 2 && ntries == 1000)
422		printf("could not write to PHY\n");
423#endif
424}
425
426int
427nfe_intr(void *arg)
428{
429	struct nfe_softc *sc = arg;
430	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
431	uint32_t r;
432
433	/* disable interrupts */
434	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
435
436	r = NFE_READ(sc, NFE_IRQ_STATUS);
437	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
438
439	if (r == 0) {
440		/* re-enable interrupts */
441		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
442		return 0;
443	}
444
445	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
446
447	if (r & NFE_IRQ_LINK) {
448		NFE_READ(sc, NFE_PHY_STATUS);
449		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
450		DPRINTF(("link state changed\n"));
451	}
452
453	if (ifp->if_flags & IFF_RUNNING) {
454		/* check Rx ring */
455		nfe_rxeof(sc);
456
457		/* check Tx ring */
458		nfe_txeof(sc);
459	}
460
461	/* re-enable interrupts */
462	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
463
464	return 1;
465}
466
467int
468nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
469{
470	struct nfe_softc *sc = ifp->if_softc;
471	struct ifreq *ifr = (struct ifreq *)data;
472	struct ifaddr *ifa = (struct ifaddr *)data;
473	int s, error = 0;
474
475	s = splnet();
476
477	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
478		splx(s);
479		return error;
480	}
481
482	switch (cmd) {
483	case SIOCSIFADDR:
484		ifp->if_flags |= IFF_UP;
485		nfe_init(ifp);
486		switch (ifa->ifa_addr->sa_family) {
487#ifdef INET
488		case AF_INET:
489			arp_ifinit(&sc->sc_arpcom, ifa);
490			break;
491#endif
492		default:
493			break;
494		}
495		break;
496	case SIOCSIFMTU:
497		if (ifr->ifr_mtu < ETHERMIN ||
498		    ((sc->sc_flags & NFE_USE_JUMBO) &&
499		    ifr->ifr_mtu > ETHERMTU_JUMBO) ||
500		    (!(sc->sc_flags & NFE_USE_JUMBO) &&
501		    ifr->ifr_mtu > ETHERMTU))
502			error = EINVAL;
503		else if (ifp->if_mtu != ifr->ifr_mtu)
504			ifp->if_mtu = ifr->ifr_mtu;
505		break;
506	case SIOCSIFFLAGS:
507		if (ifp->if_flags & IFF_UP) {
508			/*
509			 * If only the PROMISC or ALLMULTI flag changes, then
510			 * don't do a full re-init of the chip, just update
511			 * the Rx filter.
512			 */
513			if ((ifp->if_flags & IFF_RUNNING) &&
514			    ((ifp->if_flags ^ sc->sc_if_flags) &
515			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
516				nfe_setmulti(sc);
517			else
518				nfe_init(ifp);
519		} else {
520			if (ifp->if_flags & IFF_RUNNING)
521				nfe_stop(ifp, 1);
522		}
523		sc->sc_if_flags = ifp->if_flags;
524		break;
525	case SIOCADDMULTI:
526	case SIOCDELMULTI:
527		error = (cmd == SIOCADDMULTI) ?
528		    ether_addmulti(ifr, &sc->sc_arpcom) :
529		    ether_delmulti(ifr, &sc->sc_arpcom);
530
531		if (error == ENETRESET) {
532			if (ifp->if_flags & IFF_RUNNING)
533				nfe_setmulti(sc);
534			error = 0;
535		}
536		break;
537	case SIOCSIFMEDIA:
538	case SIOCGIFMEDIA:
539		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
540		break;
541	default:
542		error = EINVAL;
543	}
544
545	splx(s);
546
547	return error;
548}
549
550void
551nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
552{
553	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
554	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
555	    sizeof (struct nfe_desc32), ops);
556}
557
558void
559nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
560{
561	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
562	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
563	    sizeof (struct nfe_desc64), ops);
564}
565
566void
567nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
568{
569	if (end >= start) {
570		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
571		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
572		    (caddr_t)&sc->txq.desc32[end] -
573		    (caddr_t)&sc->txq.desc32[start], ops);
574		return;
575	}
576	/* sync from 'start' to end of ring */
577	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
578	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
579	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
580	    (caddr_t)&sc->txq.desc32[start], ops);
581
582	/* sync from start of ring to 'end' */
583	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
584	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
585}
586
587void
588nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
589{
590	if (end >= start) {
591		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
592		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
593		    (caddr_t)&sc->txq.desc64[end] -
594		    (caddr_t)&sc->txq.desc64[start], ops);
595		return;
596	}
597	/* sync from 'start' to end of ring */
598	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
599	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
600	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
601	    (caddr_t)&sc->txq.desc64[start], ops);
602
603	/* sync from start of ring to 'end' */
604	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
605	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
606}
607
608void
609nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
610{
611	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
612	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
613	    sizeof (struct nfe_desc32), ops);
614}
615
616void
617nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
618{
619	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
620	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
621	    sizeof (struct nfe_desc64), ops);
622}
623
624void
625nfe_rxeof(struct nfe_softc *sc)
626{
627	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
628	struct nfe_desc32 *desc32;
629	struct nfe_desc64 *desc64;
630	struct nfe_rx_data *data;
631	struct nfe_jbuf *jbuf;
632	struct mbuf *m, *mnew;
633	bus_addr_t physaddr;
634	uint16_t flags;
635	int error, len;
636
637	for (;;) {
638		data = &sc->rxq.data[sc->rxq.cur];
639
640		if (sc->sc_flags & NFE_40BIT_ADDR) {
641			desc64 = &sc->rxq.desc64[sc->rxq.cur];
642			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
643
644			flags = letoh16(desc64->flags);
645			len = letoh16(desc64->length) & 0x3fff;
646		} else {
647			desc32 = &sc->rxq.desc32[sc->rxq.cur];
648			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
649
650			flags = letoh16(desc32->flags);
651			len = letoh16(desc32->length) & 0x3fff;
652		}
653
654		if (flags & NFE_RX_READY)
655			break;
656
657		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
658			if (!(flags & NFE_RX_VALID_V1))
659				goto skip;
660
661			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
662				flags &= ~NFE_RX_ERROR;
663				len--;	/* fix buffer length */
664			}
665		} else {
666			if (!(flags & NFE_RX_VALID_V2))
667				goto skip;
668
669			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
670				flags &= ~NFE_RX_ERROR;
671				len--;	/* fix buffer length */
672			}
673		}
674
675		if (flags & NFE_RX_ERROR) {
676			ifp->if_ierrors++;
677			goto skip;
678		}
679
680		/*
681		 * Try to allocate a new mbuf for this ring element and load
682		 * it before processing the current mbuf. If the ring element
683		 * cannot be loaded, drop the received packet and reuse the
684		 * old mbuf. In the unlikely case that the old mbuf can't be
685		 * reloaded either, explicitly panic.
686		 */
687		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
688		if (mnew == NULL) {
689			ifp->if_ierrors++;
690			goto skip;
691		}
692
693		if (sc->sc_flags & NFE_USE_JUMBO) {
694			if ((jbuf = nfe_jalloc(sc)) == NULL) {
695				m_freem(mnew);
696				ifp->if_ierrors++;
697				goto skip;
698			}
699			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
700
701			bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
702			    mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES,
703			    BUS_DMASYNC_POSTREAD);
704
705			physaddr = jbuf->physaddr;
706		} else {
707			MCLGET(mnew, M_DONTWAIT);
708			if (!(mnew->m_flags & M_EXT)) {
709				m_freem(mnew);
710				ifp->if_ierrors++;
711				goto skip;
712			}
713
714			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
715			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
716			bus_dmamap_unload(sc->sc_dmat, data->map);
717
718			error = bus_dmamap_load(sc->sc_dmat, data->map,
719			    mtod(mnew, void *), MCLBYTES, NULL,
720			    BUS_DMA_READ | BUS_DMA_NOWAIT);
721			if (error != 0) {
722				m_freem(mnew);
723
724				/* try to reload the old mbuf */
725				error = bus_dmamap_load(sc->sc_dmat, data->map,
726				    mtod(data->m, void *), MCLBYTES, NULL,
727				    BUS_DMA_READ | BUS_DMA_NOWAIT);
728				if (error != 0) {
729					/* very unlikely that it will fail.. */
730					panic("%s: could not load old rx mbuf",
731					    sc->sc_dev.dv_xname);
732				}
733				ifp->if_ierrors++;
734				goto skip;
735			}
736			physaddr = data->map->dm_segs[0].ds_addr;
737		}
738
739		/*
740		 * New mbuf successfully loaded, update Rx ring and continue
741		 * processing.
742		 */
743		m = data->m;
744		data->m = mnew;
745
746		/* finalize mbuf */
747		m->m_pkthdr.len = m->m_len = len;
748		m->m_pkthdr.rcvif = ifp;
749
750#ifdef notyet
751		if (sc->sc_flags & NFE_HW_CSUM) {
752			if (flags & NFE_RX_IP_CSUMOK)
753				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
754			if (flags & NFE_RX_UDP_CSUMOK)
755				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
756			if (flags & NFE_RX_TCP_CSUMOK)
757				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
758		}
759#elif defined(NFE_CSUM)
760		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
761			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
762#endif
763
764#if NBPFILTER > 0
765		if (ifp->if_bpf)
766			bpf_mtap(ifp->if_bpf, m);
767#endif
768		ifp->if_ipackets++;
769		ether_input_mbuf(ifp, m);
770
771		/* update mapping address in h/w descriptor */
772		if (sc->sc_flags & NFE_40BIT_ADDR) {
773#if defined(__LP64__)
774			desc64->physaddr[0] = htole32(physaddr >> 32);
775#endif
776			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
777		} else {
778			desc32->physaddr = htole32(physaddr);
779		}
780
781skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
782			desc64->length = htole16(sc->rxq.bufsz);
783			desc64->flags = htole16(NFE_RX_READY);
784
785			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
786		} else {
787			desc32->length = htole16(sc->rxq.bufsz);
788			desc32->flags = htole16(NFE_RX_READY);
789
790			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
791		}
792
793		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
794	}
795}
796
797void
798nfe_txeof(struct nfe_softc *sc)
799{
800	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
801	struct nfe_desc32 *desc32;
802	struct nfe_desc64 *desc64;
803	struct nfe_tx_data *data;
804	uint16_t flags;
805
806	while (sc->txq.next != sc->txq.cur) {
807		data = &sc->txq.data[sc->txq.next];
808
809		if (sc->sc_flags & NFE_40BIT_ADDR) {
810			desc64 = &sc->txq.desc64[sc->txq.next];
811			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
812
813			flags = letoh16(desc64->flags);
814		} else {
815			desc32 = &sc->txq.desc32[sc->txq.next];
816			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
817
818			flags = letoh16(desc32->flags);
819		}
820
821		if (flags & NFE_TX_VALID)
822			break;
823
824		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
825			if (!(flags & NFE_TX_LASTFRAG_V1))
826				goto skip;
827
828			if ((flags & NFE_TX_ERROR_V1) != 0) {
829				DPRINTF(("tx error 0x%04x\n", flags));
830				ifp->if_oerrors++;
831			} else
832				ifp->if_opackets++;
833		} else {
834			if (!(flags & NFE_TX_LASTFRAG_V2))
835				goto skip;
836
837			if ((flags & NFE_TX_ERROR_V2) != 0) {
838				DPRINTF(("tx error 0x%04x\n", flags));
839				ifp->if_oerrors++;
840			} else
841				ifp->if_opackets++;
842		}
843
844		if (data->m == NULL) {	/* should not get there */
845			DPRINTF(("last fragment bit w/o associated mbuf!\n"));
846			goto skip;
847		}
848
849		/* last fragment of the mbuf chain transmitted */
850		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
851		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
852		bus_dmamap_unload(sc->sc_dmat, data->active);
853		m_freem(data->m);
854		data->m = NULL;
855
856skip:		sc->txq.queued--;
857		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
858	}
859
860	ifp->if_timer = 0;
861	ifp->if_flags &= ~IFF_OACTIVE;
862	nfe_start(ifp);
863}
864
865int
866nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
867{
868	struct nfe_desc32 *desc32;
869	struct nfe_desc64 *desc64;
870	struct nfe_tx_data *data;
871	struct mbuf *mnew;
872	bus_dmamap_t map;
873	uint16_t flags = NFE_TX_VALID;
874	int error, i;
875
876	map = sc->txq.data[sc->txq.cur].map;
877
878	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
879	if (error != 0 && error != EFBIG) {
880		printf("%s: could not map mbuf (error %d)\n",
881		    sc->sc_dev.dv_xname, error);
882		return error;
883	}
884	if (error != 0) {
885		/* too many fragments, linearize */
886
887		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
888		if (mnew == NULL)
889			return ENOBUFS;
890
891		M_DUP_PKTHDR(mnew, m0);
892		if (m0->m_pkthdr.len > MHLEN) {
893			MCLGET(mnew, M_DONTWAIT);
894			if (!(mnew->m_flags & M_EXT)) {
895				m_freem(mnew);
896				return ENOBUFS;
897			}
898		}
899
900		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
901		m_freem(m0);
902		mnew->m_len = mnew->m_pkthdr.len;
903		m0 = mnew;
904
905		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
906		    BUS_DMA_NOWAIT);
907		if (error != 0) {
908			printf("%s: could not map mbuf (error %d)\n",
909			    sc->sc_dev.dv_xname, error);
910			m_freem(m0);
911			return error;
912		}
913	}
914
915	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
916		bus_dmamap_unload(sc->sc_dmat, map);
917		return ENOBUFS;
918	}
919
920#ifdef NFE_CSUM
921	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
922		flags |= NFE_TX_IP_CSUM;
923	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
924		flags |= NFE_TX_TCP_CSUM;
925#endif
926
927	for (i = 0; i < map->dm_nsegs; i++) {
928		data = &sc->txq.data[sc->txq.cur];
929
930		if (sc->sc_flags & NFE_40BIT_ADDR) {
931			desc64 = &sc->txq.desc64[sc->txq.cur];
932#if defined(__LP64__)
933			desc64->physaddr[0] =
934			    htole32(map->dm_segs[i].ds_addr >> 32);
935#endif
936			desc64->physaddr[1] =
937			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
938			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
939			desc64->flags = htole16(flags);
940		} else {
941			desc32 = &sc->txq.desc32[sc->txq.cur];
942
943			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
944			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
945			desc32->flags = htole16(flags);
946		}
947
948		/* csum flags belong to the first fragment only */
949		if (map->dm_nsegs > 1)
950			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
951
952		sc->txq.queued++;
953		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
954	}
955
956	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
957	if (sc->sc_flags & NFE_40BIT_ADDR) {
958		flags |= NFE_TX_LASTFRAG_V2;
959		desc64->flags = htole16(flags);
960	} else {
961		if (sc->sc_flags & NFE_JUMBO_SUP)
962			flags |= NFE_TX_LASTFRAG_V2;
963		else
964			flags |= NFE_TX_LASTFRAG_V1;
965		desc32->flags = htole16(flags);
966	}
967
968#if NVLAN > 0
969	if (sc->sc_flags & NFE_HW_VLAN) {
970		/* setup h/w VLAN tagging */
971		if ((m0->m_flags & M_PROTO1) && m0->m_pkthdr.rcvif != NULL) {
972			struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
973			desc64->vtag = htole32(NFE_TX_VTAG |
974			    htons(ifv->ifv_tag));
975		} else
976			desc64->vtag = 0;
977	}
978#endif
979
980	data->m = m0;
981	data->active = map;
982
983	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
984	    BUS_DMASYNC_PREWRITE);
985
986	return 0;
987}
988
989void
990nfe_start(struct ifnet *ifp)
991{
992	struct nfe_softc *sc = ifp->if_softc;
993	int old = sc->txq.cur;
994	struct mbuf *m0;
995	uint32_t txctl;
996
997	for (;;) {
998		IFQ_POLL(&ifp->if_snd, m0);
999		if (m0 == NULL)
1000			break;
1001
1002		if (nfe_encap(sc, m0) != 0) {
1003			ifp->if_flags |= IFF_OACTIVE;
1004			break;
1005		}
1006
1007		/* packet put in h/w queue, remove from s/w queue */
1008		IFQ_DEQUEUE(&ifp->if_snd, m0);
1009
1010#if NBPFILTER > 0
1011		if (ifp->if_bpf != NULL)
1012			bpf_mtap(ifp->if_bpf, m0);
1013#endif
1014	}
1015	if (sc->txq.cur == old)	/* nothing sent */
1016		return;
1017
1018	if (sc->sc_flags & NFE_40BIT_ADDR)
1019		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1020	else
1021		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1022
1023	txctl = NFE_RXTX_KICKTX;
1024	if (sc->sc_flags & NFE_40BIT_ADDR)
1025		txctl |= NFE_RXTX_V3MAGIC;
1026	else if (sc->sc_flags & NFE_JUMBO_SUP)
1027		txctl |= NFE_RXTX_V2MAGIC;
1028#ifdef NFE_CSUM
1029	if (sc->sc_flags & NFE_HW_CSUM)
1030		txctl |= NFE_RXTX_RXCHECK;
1031#endif
1032
1033	/* kick Tx */
1034	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
1035
1036	/*
1037	 * Set a timeout in case the chip goes out to lunch.
1038	 */
1039	ifp->if_timer = 5;
1040}
1041
1042void
1043nfe_watchdog(struct ifnet *ifp)
1044{
1045	struct nfe_softc *sc = ifp->if_softc;
1046
1047	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1048
1049	ifp->if_flags &= ~IFF_RUNNING;
1050	nfe_init(ifp);
1051
1052	ifp->if_oerrors++;
1053}
1054
1055int
1056nfe_init(struct ifnet *ifp)
1057{
1058	struct nfe_softc *sc = ifp->if_softc;
1059	uint32_t tmp, rxtxctl;
1060
1061	if (ifp->if_flags & IFF_RUNNING)
1062		return 0;
1063
1064	nfe_stop(ifp, 0);
1065
1066	nfe_ifmedia_upd(ifp);
1067
1068	NFE_WRITE(sc, NFE_TX_UNK, 0);
1069
1070	rxtxctl = NFE_RXTX_BIT2;
1071	if (sc->sc_flags & NFE_40BIT_ADDR)
1072		rxtxctl |= NFE_RXTX_V3MAGIC;
1073	else if (sc->sc_flags & NFE_JUMBO_SUP)
1074		rxtxctl |= NFE_RXTX_V2MAGIC;
1075#ifdef NFE_CSUM
1076	if (sc->sc_flags & NFE_HW_CSUM)
1077		rxtxctl |= NFE_RXTX_RXCHECK;
1078#endif
1079
1080	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
1081	DELAY(10);
1082	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1083
1084	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1085
1086	/* set MAC address */
1087	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1088
1089	/* tell MAC where rings are in memory */
1090#ifdef __LP64__
1091	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1092#endif
1093	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1094#ifdef __LP64__
1095	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1096#endif
1097	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1098
1099	NFE_WRITE(sc, NFE_RING_SIZE,
1100	    (NFE_RX_RING_COUNT - 1) << 16 |
1101	    (NFE_TX_RING_COUNT - 1));
1102
1103	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1104
1105	/* force MAC to wakeup */
1106	tmp = NFE_READ(sc, NFE_PWR_STATE);
1107	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1108	DELAY(10);
1109	tmp = NFE_READ(sc, NFE_PWR_STATE);
1110	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1111
1112#ifdef notyet
1113	/* configure interrupts coalescing/mitigation */
1114	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1115#else
1116	/* no interrupt mitigation: one interrupt per packet */
1117	NFE_WRITE(sc, NFE_IMTIMER, 970);
1118#endif
1119
1120	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1121	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1122
1123	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1124	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1125
1126	rxtxctl &= ~NFE_RXTX_BIT2;
1127	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1128	DELAY(10);
1129	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
1130
1131	/* set Rx filter */
1132	nfe_setmulti(sc);
1133
1134	/* enable Rx */
1135	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1136
1137	/* enable Tx */
1138	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1139
1140	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1141
1142	/* enable interrupts */
1143	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1144
1145	timeout_add(&sc->sc_tick_ch, hz);
1146
1147	ifp->if_flags |= IFF_RUNNING;
1148	ifp->if_flags &= ~IFF_OACTIVE;
1149
1150	return 0;
1151}
1152
1153void
1154nfe_stop(struct ifnet *ifp, int disable)
1155{
1156	struct nfe_softc *sc = ifp->if_softc;
1157
1158	timeout_del(&sc->sc_tick_ch);
1159
1160	ifp->if_timer = 0;
1161	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1162
1163	mii_down(&sc->sc_mii);
1164
1165	/* abort Tx */
1166	NFE_WRITE(sc, NFE_TX_CTL, 0);
1167
1168	/* disable Rx */
1169	NFE_WRITE(sc, NFE_RX_CTL, 0);
1170
1171	/* disable interrupts */
1172	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1173
1174	/* reset Tx and Rx rings */
1175	nfe_reset_tx_ring(sc, &sc->txq);
1176	nfe_reset_rx_ring(sc, &sc->rxq);
1177}
1178
1179int
1180nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1181{
1182	struct nfe_desc32 *desc32;
1183	struct nfe_desc64 *desc64;
1184	struct nfe_rx_data *data;
1185	struct nfe_jbuf *jbuf;
1186	void **desc;
1187	bus_addr_t physaddr;
1188	int i, nsegs, error, descsize;
1189
1190	if (sc->sc_flags & NFE_40BIT_ADDR) {
1191		desc = (void **)&ring->desc64;
1192		descsize = sizeof (struct nfe_desc64);
1193	} else {
1194		desc = (void **)&ring->desc32;
1195		descsize = sizeof (struct nfe_desc32);
1196	}
1197
1198	ring->cur = ring->next = 0;
1199	ring->bufsz = MCLBYTES;
1200
1201	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1202	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1203	if (error != 0) {
1204		printf("%s: could not create desc DMA map\n",
1205		    sc->sc_dev.dv_xname);
1206		goto fail;
1207	}
1208
1209	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1210	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1211	if (error != 0) {
1212		printf("%s: could not allocate DMA memory\n",
1213		    sc->sc_dev.dv_xname);
1214		goto fail;
1215	}
1216
1217	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1218	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1219	if (error != 0) {
1220		printf("%s: could not map desc DMA memory\n",
1221		    sc->sc_dev.dv_xname);
1222		goto fail;
1223	}
1224
1225	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1226	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1227	if (error != 0) {
1228		printf("%s: could not load desc DMA map\n",
1229		    sc->sc_dev.dv_xname);
1230		goto fail;
1231	}
1232
1233	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1234	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1235
1236	if (sc->sc_flags & NFE_USE_JUMBO) {
1237		ring->bufsz = NFE_JBYTES;
1238		if ((error = nfe_jpool_alloc(sc)) != 0) {
1239			printf("%s: could not allocate jumbo frames\n",
1240			    sc->sc_dev.dv_xname);
1241			goto fail;
1242		}
1243	}
1244
1245	/*
1246	 * Pre-allocate Rx buffers and populate Rx ring.
1247	 */
1248	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1249		data = &sc->rxq.data[i];
1250
1251		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1252		if (data->m == NULL) {
1253			printf("%s: could not allocate rx mbuf\n",
1254			    sc->sc_dev.dv_xname);
1255			error = ENOMEM;
1256			goto fail;
1257		}
1258
1259		if (sc->sc_flags & NFE_USE_JUMBO) {
1260			if ((jbuf = nfe_jalloc(sc)) == NULL) {
1261				printf("%s: could not allocate jumbo buffer\n",
1262				    sc->sc_dev.dv_xname);
1263				goto fail;
1264			}
1265			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1266			    sc);
1267
1268			physaddr = jbuf->physaddr;
1269		} else {
1270			error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1271			    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1272			if (error != 0) {
1273				printf("%s: could not create DMA map\n",
1274				    sc->sc_dev.dv_xname);
1275				goto fail;
1276			}
1277			MCLGET(data->m, M_DONTWAIT);
1278			if (!(data->m->m_flags & M_EXT)) {
1279				printf("%s: could not allocate mbuf cluster\n",
1280				    sc->sc_dev.dv_xname);
1281				error = ENOMEM;
1282				goto fail;
1283			}
1284
1285			error = bus_dmamap_load(sc->sc_dmat, data->map,
1286			    mtod(data->m, void *), MCLBYTES, NULL,
1287			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1288			if (error != 0) {
1289				printf("%s: could not load rx buf DMA map",
1290				    sc->sc_dev.dv_xname);
1291				goto fail;
1292			}
1293			physaddr = data->map->dm_segs[0].ds_addr;
1294		}
1295
1296		if (sc->sc_flags & NFE_40BIT_ADDR) {
1297			desc64 = &sc->rxq.desc64[i];
1298#if defined(__LP64__)
1299			desc64->physaddr[0] = htole32(physaddr >> 32);
1300#endif
1301			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1302			desc64->length = htole16(sc->rxq.bufsz);
1303			desc64->flags = htole16(NFE_RX_READY);
1304		} else {
1305			desc32 = &sc->rxq.desc32[i];
1306			desc32->physaddr = htole32(physaddr);
1307			desc32->length = htole16(sc->rxq.bufsz);
1308			desc32->flags = htole16(NFE_RX_READY);
1309		}
1310	}
1311
1312	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1313	    BUS_DMASYNC_PREWRITE);
1314
1315	return 0;
1316
1317fail:	nfe_free_rx_ring(sc, ring);
1318	return error;
1319}
1320
1321void
1322nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1323{
1324	int i;
1325
1326	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1327		if (sc->sc_flags & NFE_40BIT_ADDR) {
1328			ring->desc64[i].length = htole16(ring->bufsz);
1329			ring->desc64[i].flags = htole16(NFE_RX_READY);
1330		} else {
1331			ring->desc32[i].length = htole16(ring->bufsz);
1332			ring->desc32[i].flags = htole16(NFE_RX_READY);
1333		}
1334	}
1335
1336	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1337	    BUS_DMASYNC_PREWRITE);
1338
1339	ring->cur = ring->next = 0;
1340}
1341
1342void
1343nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1344{
1345	struct nfe_rx_data *data;
1346	void *desc;
1347	int i, descsize;
1348
1349	if (sc->sc_flags & NFE_40BIT_ADDR) {
1350		desc = ring->desc64;
1351		descsize = sizeof (struct nfe_desc64);
1352	} else {
1353		desc = ring->desc32;
1354		descsize = sizeof (struct nfe_desc32);
1355	}
1356
1357	if (desc != NULL) {
1358		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1359		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1360		bus_dmamap_unload(sc->sc_dmat, ring->map);
1361		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1362		    NFE_RX_RING_COUNT * descsize);
1363		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1364	}
1365
1366	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1367		data = &ring->data[i];
1368
1369		if (data->map != NULL) {
1370			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1371			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1372			bus_dmamap_unload(sc->sc_dmat, data->map);
1373			bus_dmamap_destroy(sc->sc_dmat, data->map);
1374		}
1375		if (data->m != NULL)
1376			m_freem(data->m);
1377	}
1378}
1379
1380struct nfe_jbuf *
1381nfe_jalloc(struct nfe_softc *sc)
1382{
1383	struct nfe_jbuf *jbuf;
1384
1385	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1386	if (jbuf == NULL)
1387		return NULL;
1388	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1389	return jbuf;
1390}
1391
1392/*
1393 * This is called automatically by the network stack when the mbuf is freed.
1394 * Caution must be taken that the NIC might be reset by the time the mbuf is
1395 * freed.
1396 */
1397void
1398nfe_jfree(caddr_t buf, u_int size, void *arg)
1399{
1400	struct nfe_softc *sc = arg;
1401	struct nfe_jbuf *jbuf;
1402	int i;
1403
1404	/* find the jbuf from the base pointer */
1405	i = (buf - sc->rxq.jpool) / NFE_JBYTES;
1406	if (i < 0 || i >= NFE_JPOOL_COUNT) {
1407		printf("%s: request to free a buffer (%p) not managed by us\n",
1408		    sc->sc_dev.dv_xname, buf);
1409		return;
1410	}
1411	jbuf = &sc->rxq.jbuf[i];
1412
1413	/* ..and put it back in the free list */
1414	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1415}
1416
1417int
1418nfe_jpool_alloc(struct nfe_softc *sc)
1419{
1420	struct nfe_rx_ring *ring = &sc->rxq;
1421	struct nfe_jbuf *jbuf;
1422	bus_addr_t physaddr;
1423	caddr_t buf;
1424	int i, nsegs, error;
1425
1426	/*
1427	 * Allocate a big chunk of DMA'able memory.
1428	 */
1429	error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1430	    NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1431	if (error != 0) {
1432		printf("%s: could not create jumbo DMA map\n",
1433		    sc->sc_dev.dv_xname);
1434		goto fail;
1435	}
1436
1437	error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1438	    &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1439	if (error != 0) {
1440		printf("%s could not allocate jumbo DMA memory\n",
1441		    sc->sc_dev.dv_xname);
1442		goto fail;
1443	}
1444
1445	error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1446	    &ring->jpool, BUS_DMA_NOWAIT);
1447	if (error != 0) {
1448		printf("%s: could not map jumbo DMA memory\n",
1449		    sc->sc_dev.dv_xname);
1450		goto fail;
1451	}
1452
1453	error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1454	    NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1455	if (error != 0) {
1456		printf("%s: could not load jumbo DMA map\n",
1457		    sc->sc_dev.dv_xname);
1458		goto fail;
1459	}
1460
1461	/* ..and split it into 9KB chunks */
1462	SLIST_INIT(&ring->jfreelist);
1463
1464	buf = ring->jpool;
1465	physaddr = ring->jmap->dm_segs[0].ds_addr;
1466	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1467		jbuf = &ring->jbuf[i];
1468
1469		jbuf->buf = buf;
1470		jbuf->physaddr = physaddr;
1471
1472		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1473
1474		buf += NFE_JBYTES;
1475		physaddr += NFE_JBYTES;
1476	}
1477
1478	return 0;
1479
1480fail:	nfe_jpool_free(sc);
1481	return error;
1482}
1483
1484void
1485nfe_jpool_free(struct nfe_softc *sc)
1486{
1487	struct nfe_rx_ring *ring = &sc->rxq;
1488
1489	if (ring->jmap != NULL) {
1490		bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1491		    ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1492		bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1493		bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1494	}
1495	if (ring->jpool != NULL) {
1496		bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1497		bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1498	}
1499}
1500
1501int
1502nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1503{
1504	int i, nsegs, error;
1505	void **desc;
1506	int descsize;
1507
1508	if (sc->sc_flags & NFE_40BIT_ADDR) {
1509		desc = (void **)&ring->desc64;
1510		descsize = sizeof (struct nfe_desc64);
1511	} else {
1512		desc = (void **)&ring->desc32;
1513		descsize = sizeof (struct nfe_desc32);
1514	}
1515
1516	ring->queued = 0;
1517	ring->cur = ring->next = 0;
1518
1519	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1520	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1521
1522	if (error != 0) {
1523		printf("%s: could not create desc DMA map\n",
1524		    sc->sc_dev.dv_xname);
1525		goto fail;
1526	}
1527
1528	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1529	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1530	if (error != 0) {
1531		printf("%s: could not allocate DMA memory\n",
1532		    sc->sc_dev.dv_xname);
1533		goto fail;
1534	}
1535
1536	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1537	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1538	if (error != 0) {
1539		printf("%s: could not map desc DMA memory\n",
1540		    sc->sc_dev.dv_xname);
1541		goto fail;
1542	}
1543
1544	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1545	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1546	if (error != 0) {
1547		printf("%s: could not load desc DMA map\n",
1548		    sc->sc_dev.dv_xname);
1549		goto fail;
1550	}
1551
1552	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1553	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1554
1555	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1556		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1557		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1558		    &ring->data[i].map);
1559		if (error != 0) {
1560			printf("%s: could not create DMA map\n",
1561			    sc->sc_dev.dv_xname);
1562			goto fail;
1563		}
1564	}
1565
1566	return 0;
1567
1568fail:	nfe_free_tx_ring(sc, ring);
1569	return error;
1570}
1571
1572void
1573nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1574{
1575	struct nfe_tx_data *data;
1576	int i;
1577
1578	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1579		if (sc->sc_flags & NFE_40BIT_ADDR)
1580			ring->desc64[i].flags = 0;
1581		else
1582			ring->desc32[i].flags = 0;
1583
1584		data = &ring->data[i];
1585
1586		if (data->m != NULL) {
1587			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1588			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1589			bus_dmamap_unload(sc->sc_dmat, data->active);
1590			m_freem(data->m);
1591			data->m = NULL;
1592		}
1593	}
1594
1595	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1596	    BUS_DMASYNC_PREWRITE);
1597
1598	ring->queued = 0;
1599	ring->cur = ring->next = 0;
1600}
1601
1602void
1603nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1604{
1605	struct nfe_tx_data *data;
1606	void *desc;
1607	int i, descsize;
1608
1609	if (sc->sc_flags & NFE_40BIT_ADDR) {
1610		desc = ring->desc64;
1611		descsize = sizeof (struct nfe_desc64);
1612	} else {
1613		desc = ring->desc32;
1614		descsize = sizeof (struct nfe_desc32);
1615	}
1616
1617	if (desc != NULL) {
1618		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1619		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1620		bus_dmamap_unload(sc->sc_dmat, ring->map);
1621		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1622		    NFE_TX_RING_COUNT * descsize);
1623		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1624	}
1625
1626	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1627		data = &ring->data[i];
1628
1629		if (data->m != NULL) {
1630			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1631			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1632			bus_dmamap_unload(sc->sc_dmat, data->active);
1633			m_freem(data->m);
1634		}
1635	}
1636
1637	/* ..and now actually destroy the DMA mappings */
1638	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1639		data = &ring->data[i];
1640		if (data->map == NULL)
1641			continue;
1642		bus_dmamap_destroy(sc->sc_dmat, data->map);
1643	}
1644}
1645
1646int
1647nfe_ifmedia_upd(struct ifnet *ifp)
1648{
1649	struct nfe_softc *sc = ifp->if_softc;
1650	struct mii_data *mii = &sc->sc_mii;
1651	struct mii_softc *miisc;
1652
1653	if (mii->mii_instance != 0) {
1654		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1655			mii_phy_reset(miisc);
1656	}
1657	return mii_mediachg(mii);
1658}
1659
1660void
1661nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1662{
1663	struct nfe_softc *sc = ifp->if_softc;
1664	struct mii_data *mii = &sc->sc_mii;
1665
1666	mii_pollstat(mii);
1667	ifmr->ifm_status = mii->mii_media_status;
1668	ifmr->ifm_active = mii->mii_media_active;
1669}
1670
1671void
1672nfe_setmulti(struct nfe_softc *sc)
1673{
1674	struct arpcom *ac = &sc->sc_arpcom;
1675	struct ifnet *ifp = &ac->ac_if;
1676	struct ether_multi *enm;
1677	struct ether_multistep step;
1678	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1679	uint32_t filter = NFE_RXFILTER_MAGIC;
1680	int i;
1681
1682	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1683		bzero(addr, ETHER_ADDR_LEN);
1684		bzero(mask, ETHER_ADDR_LEN);
1685		goto done;
1686	}
1687
1688	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1689	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1690
1691	ETHER_FIRST_MULTI(step, ac, enm);
1692	while (enm != NULL) {
1693		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1694			ifp->if_flags |= IFF_ALLMULTI;
1695			bzero(addr, ETHER_ADDR_LEN);
1696			bzero(mask, ETHER_ADDR_LEN);
1697			goto done;
1698		}
1699		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1700			addr[i] &=  enm->enm_addrlo[i];
1701			mask[i] &= ~enm->enm_addrlo[i];
1702		}
1703		ETHER_NEXT_MULTI(step, enm);
1704	}
1705	for (i = 0; i < ETHER_ADDR_LEN; i++)
1706		mask[i] |= addr[i];
1707
1708done:
1709	addr[0] |= 0x01;	/* make sure multicast bit is set */
1710
1711	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1712	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1713	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1714	    addr[5] <<  8 | addr[4]);
1715	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1716	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1717	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1718	    mask[5] <<  8 | mask[4]);
1719
1720	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1721	NFE_WRITE(sc, NFE_RXFILTER, filter);
1722}
1723
1724void
1725nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1726{
1727	uint32_t tmp;
1728
1729	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1730	addr[0] = (tmp >> 8) & 0xff;
1731	addr[1] = (tmp & 0xff);
1732
1733	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1734	addr[2] = (tmp >> 24) & 0xff;
1735	addr[3] = (tmp >> 16) & 0xff;
1736	addr[4] = (tmp >>  8) & 0xff;
1737	addr[5] = (tmp & 0xff);
1738}
1739
1740void
1741nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1742{
1743	NFE_WRITE(sc, NFE_MACADDR_LO,
1744	    addr[5] <<  8 | addr[4]);
1745	NFE_WRITE(sc, NFE_MACADDR_HI,
1746	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1747}
1748
1749void
1750nfe_tick(void *arg)
1751{
1752	struct nfe_softc *sc = arg;
1753	int s;
1754
1755	s = splnet();
1756	mii_tick(&sc->sc_mii);
1757	splx(s);
1758
1759	timeout_add(&sc->sc_tick_ch, hz);
1760}
1761