if_nfe.c revision 1.10
1/*	$OpenBSD: if_nfe.c,v 1.10 2006/02/04 09:46:48 damien Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for nvidia nForce Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/malloc.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_media.h>
41
42#ifdef INET
43#include <netinet/in.h>
44#include <netinet/in_systm.h>
45#include <netinet/in_var.h>
46#include <netinet/ip.h>
47#include <netinet/if_ether.h>
48#endif
49
50#if NVLAN > 0
51#include <net/if_types.h>
52#include <net/if_vlan_var.h>
53#endif
54
55#if NBPFILTER > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64#include <dev/pci/pcidevs.h>
65
66#include <dev/pci/if_nfereg.h>
67#include <dev/pci/if_nfevar.h>
68
69int	nfe_match(struct device *, void *, void *);
70void	nfe_attach(struct device *, struct device *, void *);
71void	nfe_power(int, void *);
72void	nfe_miibus_statchg(struct device *);
73int	nfe_miibus_readreg(struct device *, int, int);
74void	nfe_miibus_writereg(struct device *, int, int, int);
75int	nfe_intr(void *);
76int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
77void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
78void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
79void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81void	nfe_rxeof(struct nfe_softc *);
82void	nfe_txeof(struct nfe_softc *);
83int	nfe_encap(struct nfe_softc *, struct mbuf *);
84void	nfe_start(struct ifnet *);
85void	nfe_watchdog(struct ifnet *);
86int	nfe_init(struct ifnet *);
87void	nfe_stop(struct ifnet *, int);
88int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
89void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
90void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
91int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
92void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
93void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
94int	nfe_mediachange(struct ifnet *);
95void	nfe_mediastatus(struct ifnet *, struct ifmediareq *);
96void	nfe_setmulti(struct nfe_softc *);
97void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
98void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
99void	nfe_tick(void *);
100
101struct cfattach nfe_ca = {
102	sizeof (struct nfe_softc), nfe_match, nfe_attach
103};
104
105struct cfdriver nfe_cd = {
106	NULL, "nfe", DV_IFNET
107};
108
109#define NFE_DEBUG
110
111#ifdef NFE_DEBUG
112int nfedebug = 1;
113#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
114#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
115#else
116#define DPRINTF(x)
117#define DPRINTFN(n,x)
118#endif
119
120const struct pci_matchid nfe_devices[] = {
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }
136};
137
138int
139nfe_match(struct device *dev, void *match, void *aux)
140{
141	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
142	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
143}
144
145void
146nfe_attach(struct device *parent, struct device *self, void *aux)
147{
148	struct nfe_softc *sc = (struct nfe_softc *)self;
149	struct pci_attach_args *pa = aux;
150	pci_chipset_tag_t pc = pa->pa_pc;
151	pci_intr_handle_t ih;
152	const char *intrstr;
153	struct ifnet *ifp;
154	bus_size_t memsize;
155
156	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
157	    &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) {
158		printf(": can't map mem space\n");
159		return;
160	}
161
162	/*
163	 * Allocate interrupt.
164	 */
165	if (pci_intr_map(pa, &ih) != 0) {
166		printf(": couldn't map interrupt\n");
167		return;
168	}
169
170	intrstr = pci_intr_string(pc, ih);
171	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
172	    sc->sc_dev.dv_xname);
173	if (sc->sc_ih == NULL) {
174		printf(": couldn't establish interrupt");
175		if (intrstr != NULL)
176			printf(" at %s", intrstr);
177		printf("\n");
178		return;
179	}
180	printf(": %s", intrstr);
181
182	sc->sc_dmat = pa->pa_dmat;
183
184	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
185	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
186
187	sc->sc_flags = 0;
188
189	switch (PCI_PRODUCT(pa->pa_id)) {
190	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
191	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
192	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
193	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
194		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
195		break;
196	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
197	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
198		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
199		break;
200	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
201	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
202	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
203	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
204	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
205	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
206		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
207		break;
208	}
209
210	/*
211	 * Allocate Tx and Rx rings.
212	 */
213	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
214		printf("%s: could not allocate Tx ring\n",
215		    sc->sc_dev.dv_xname);
216		return;
217	}
218
219	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
220		printf("%s: could not allocate Rx ring\n",
221		    sc->sc_dev.dv_xname);
222		nfe_free_tx_ring(sc, &sc->txq);
223		return;
224	}
225
226	ifp = &sc->sc_arpcom.ac_if;
227	ifp->if_softc = sc;
228	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
229	ifp->if_ioctl = nfe_ioctl;
230	ifp->if_start = nfe_start;
231	ifp->if_watchdog = nfe_watchdog;
232	ifp->if_init = nfe_init;
233	ifp->if_baudrate = IF_Gbps(1);
234	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
235	IFQ_SET_READY(&ifp->if_snd);
236	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
237
238	if (sc->sc_flags & NFE_HW_CSUM) {
239		ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
240		    IFCAP_CSUM_UDPv4;
241	}
242
243	sc->sc_mii.mii_ifp = ifp;
244	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
245	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
246	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
247
248	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_mediachange,
249	    nfe_mediastatus);
250	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
251	    MII_OFFSET_ANY, 0);
252	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
253		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
254		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
255		    0, NULL);
256		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
257	} else
258		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
259
260	if_attach(ifp);
261	ether_ifattach(ifp);
262
263	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
264}
265
266void
267nfe_power(int why, void *arg)
268{
269	struct nfe_softc *sc = arg;
270	struct ifnet *ifp;
271
272	if (why == PWR_RESUME) {
273		ifp = &sc->sc_arpcom.ac_if;
274		if (ifp->if_flags & IFF_UP) {
275			nfe_init(ifp);
276			if (ifp->if_flags & IFF_RUNNING)
277				nfe_start(ifp);
278		}
279	}
280}
281
282void
283nfe_miibus_statchg(struct device *dev)
284{
285	struct nfe_softc *sc = (struct nfe_softc *)dev;
286	struct mii_data *mii = &sc->sc_mii;
287	uint32_t reg;
288
289	reg = NFE_READ(sc, NFE_PHY_INT);
290
291	switch (IFM_SUBTYPE(mii->mii_media_active)) {
292	case IFM_1000_T:
293		reg |= NFE_PHY_1000T;
294		break;
295	case IFM_100_TX:
296		reg |= NFE_PHY_100TX;
297		break;
298	}
299
300	NFE_WRITE(sc, NFE_PHY_INT, reg);
301}
302
303int
304nfe_miibus_readreg(struct device *dev, int phy, int reg)
305{
306	struct nfe_softc *sc = (struct nfe_softc *)dev;
307	uint32_t val;
308	int ntries;
309
310	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
311
312	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
313		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
314		DELAY(100);
315	}
316
317	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
318
319	for (ntries = 0; ntries < 1000; ntries++) {
320		DELAY(100);
321		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
322			break;
323	}
324	if (ntries == 1000) {
325		DPRINTFN(2, ("timeout waiting for PHY\n"));
326		return 0;
327	}
328
329	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
330		DPRINTFN(2, ("could not read PHY\n"));
331		return 0;
332	}
333
334	val = NFE_READ(sc, NFE_PHY_DATA);
335	if (val != 0xffffffff && val != 0)
336		sc->phyaddr = phy;
337
338	DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val));
339
340	return val;
341}
342
343void
344nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
345{
346	struct nfe_softc *sc = (struct nfe_softc *)dev;
347	uint32_t ctl;
348	int ntries;
349
350	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
351
352	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
353		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
354		DELAY(100);
355	}
356
357	NFE_WRITE(sc, NFE_PHY_DATA, val);
358	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
359	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
360
361	for (ntries = 0; ntries < 1000; ntries++) {
362		DELAY(100);
363		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
364			break;
365	}
366#ifdef NFE_DEBUG
367	if (nfedebug >= 2 && ntries == 1000)
368		printf("could not write to PHY\n");
369#endif
370}
371
372int
373nfe_intr(void *arg)
374{
375	struct nfe_softc *sc = arg;
376	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
377	uint32_t r;
378
379	/* disable interrupts */
380	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
381
382	r = NFE_READ(sc, NFE_IRQ_STATUS);
383	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
384
385	if (r == 0) {
386		/* re-enable interrupts */
387		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
388		return 0;
389	}
390
391	if (ifp->if_flags & IFF_RUNNING) {
392		/* check Rx ring */
393		nfe_rxeof(sc);
394
395		/* check Tx ring */
396		nfe_txeof(sc);
397	}
398
399	DPRINTF(("nfe_intr: interrupt register %x", r));
400
401	/* re-enable interrupts */
402	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
403
404	return 1;
405}
406
407int
408nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
409{
410	struct nfe_softc *sc = ifp->if_softc;
411	struct ifreq *ifr = (struct ifreq *)data;
412	struct ifaddr *ifa = (struct ifaddr *)data;
413	int s, error = 0;
414
415	s = splnet();
416
417	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
418		splx(s);
419		return error;
420	}
421
422	switch (cmd) {
423	case SIOCSIFADDR:
424		ifp->if_flags |= IFF_UP;
425		switch (ifa->ifa_addr->sa_family) {
426#ifdef INET
427		case AF_INET:
428			nfe_init(ifp);
429			arp_ifinit(&sc->sc_arpcom, ifa);
430			break;
431#endif
432		default:
433			nfe_init(ifp);
434			break;
435		}
436		break;
437	case SIOCSIFMTU:
438		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
439			error = EINVAL;
440		else if (ifp->if_mtu != ifr->ifr_mtu)
441			ifp->if_mtu = ifr->ifr_mtu;
442		break;
443	case SIOCSIFFLAGS:
444		if (ifp->if_flags & IFF_UP) {
445			/*
446			 * If only the PROMISC or ALLMULTI flag changes, then
447			 * don't do a full re-init of the chip, just update
448			 * the Rx filter.
449			 */
450			if ((ifp->if_flags & IFF_RUNNING) &&
451			    ((ifp->if_flags ^ sc->sc_if_flags) &
452			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
453				nfe_setmulti(sc);
454			else
455				nfe_init(ifp);
456		} else {
457			if (ifp->if_flags & IFF_RUNNING)
458				nfe_stop(ifp, 1);
459		}
460		sc->sc_if_flags = ifp->if_flags;
461		break;
462	case SIOCADDMULTI:
463	case SIOCDELMULTI:
464		error = (cmd == SIOCADDMULTI) ?
465		    ether_addmulti(ifr, &sc->sc_arpcom) :
466		    ether_delmulti(ifr, &sc->sc_arpcom);
467
468		if (error == ENETRESET) {
469			if (ifp->if_flags & IFF_RUNNING)
470				nfe_setmulti(sc);
471			error = 0;
472		}
473		break;
474	case SIOCSIFMEDIA:
475	case SIOCGIFMEDIA:
476		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
477		break;
478	default:
479		error = EINVAL;
480	}
481
482	splx(s);
483
484	return error;
485}
486
487void
488nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
489{
490	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
491	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
492	    sizeof (struct nfe_desc32), ops);
493}
494
495void
496nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
497{
498	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
499	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
500	    sizeof (struct nfe_desc64), ops);
501}
502
503void
504nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
505{
506	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
507	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
508	    sizeof (struct nfe_desc32), ops);
509}
510
511void
512nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
513{
514	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
515	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
516	    sizeof (struct nfe_desc64), ops);
517}
518
519void
520nfe_rxeof(struct nfe_softc *sc)
521{
522	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
523	struct nfe_desc32 *desc32;
524	struct nfe_desc64 *desc64;
525	struct nfe_rx_data *data;
526	struct mbuf *m, *mnew;
527	uint16_t flags;
528	int error, len;
529
530	for (;;) {
531		data = &sc->rxq.data[sc->rxq.cur];
532
533		if (sc->sc_flags & NFE_40BIT_ADDR) {
534			desc64 = &sc->rxq.desc64[sc->rxq.cur];
535			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
536
537			flags = letoh16(desc64->flags);
538			len = letoh16(desc64->length) & 0x3fff;
539		} else {
540			desc32 = &sc->rxq.desc32[sc->rxq.cur];
541			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
542
543			flags = letoh16(desc32->flags);
544			len = letoh16(desc32->length) & 0x3fff;
545		}
546
547		if (flags & NFE_RX_READY)
548			break;
549
550		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
551			if (!(flags & NFE_RX_VALID_V1))
552				goto skip;
553
554			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
555				flags &= ~NFE_RX_ERROR;
556				len--;	/* fix buffer length */
557			}
558		} else {
559			if (!(flags & NFE_RX_VALID_V2))
560				goto skip;
561
562			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
563				flags &= ~NFE_RX_ERROR;
564				len--;	/* fix buffer length */
565			}
566		}
567
568		if (flags & NFE_RX_ERROR) {
569			ifp->if_ierrors++;
570			goto skip;
571		}
572
573		/*
574		 * Try to allocate a new mbuf for this ring element and load
575		 * it before processing the current mbuf. If the ring element
576		 * cannot be loaded, drop the received packet and reuse the
577		 * old mbuf. In the unlikely case that the old mbuf can't be
578		 * reloaded either, explicitly panic.
579		 */
580		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
581		if (mnew == NULL) {
582			ifp->if_ierrors++;
583			goto skip;
584		}
585
586		MCLGET(mnew, M_DONTWAIT);
587		if (!(mnew->m_flags & M_EXT)) {
588			m_freem(mnew);
589			ifp->if_ierrors++;
590			goto skip;
591		}
592
593		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
594		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
595		bus_dmamap_unload(sc->sc_dmat, data->map);
596
597		error = bus_dmamap_load(sc->sc_dmat, data->map,
598		    mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
599		if (error != 0) {
600			m_freem(mnew);
601
602			/* try to reload the old mbuf */
603			error = bus_dmamap_load(sc->sc_dmat, data->map,
604			    mtod(data->m, void *), MCLBYTES, NULL,
605			    BUS_DMA_NOWAIT);
606			if (error != 0) {
607				/* very unlikely that it will fail... */
608				panic("%s: could not load old rx mbuf",
609				    sc->sc_dev.dv_xname);
610			}
611			ifp->if_ierrors++;
612			goto skip;
613		}
614
615		/*
616		 * New mbuf successfully loaded, update Rx ring and continue
617		 * processing.
618		 */
619		m = data->m;
620		data->m = mnew;
621
622		/* finalize mbuf */
623		m->m_pkthdr.len = m->m_len = len;
624		m->m_pkthdr.rcvif = ifp;
625
626#ifdef notyet
627		if (sc->sc_flags & NFE_HW_CSUM) {
628			if (flags & NFE_RX_IP_CSUMOK)
629				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
630			if (flags & NFE_RX_UDP_CSUMOK)
631				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
632			if (flags & NFE_RX_TCP_CSUMOK)
633				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
634		}
635#else
636		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
637			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
638#endif
639
640#if NBPFILTER > 0
641		if (ifp->if_bpf)
642			bpf_mtap(ifp->if_bpf, m);
643#endif
644		ifp->if_ipackets++;
645		ether_input_mbuf(ifp, m);
646
647skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
648#if defined(__amd64__)
649			desc64->physaddr[0] =
650			    htole32(data->map->dm_segs->ds_addr >> 32);
651#endif
652			desc64->physaddr[1] =
653			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
654			desc64->flags = htole16(NFE_RX_READY);
655			desc64->length = htole16(MCLBYTES);
656
657			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
658		} else {
659			desc32->physaddr =
660			    htole32(data->map->dm_segs->ds_addr);
661			desc32->flags = htole16(NFE_RX_READY);
662			desc32->length = htole16(MCLBYTES);
663
664			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
665		}
666
667		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
668	}
669}
670
671void
672nfe_txeof(struct nfe_softc *sc)
673{
674	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
675	struct nfe_desc32 *desc32;
676	struct nfe_desc64 *desc64;
677	struct nfe_tx_data *data;
678	uint16_t flags;
679
680/* XXX: should limit # iterations to NFE_TX_RING_COUNT */
681	for (;;) {
682		data = &sc->txq.data[sc->txq.next];
683
684		if (sc->sc_flags & NFE_40BIT_ADDR) {
685			desc64 = &sc->txq.desc64[sc->txq.next];
686			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
687
688			flags = letoh16(desc64->flags);
689		} else {
690			desc32 = &sc->txq.desc32[sc->txq.next];
691			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
692
693			flags = letoh16(desc32->flags);
694		}
695
696		if (!(flags & NFE_TX_VALID))
697			break;
698
699		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
700			if (!(flags & NFE_TX_LASTFRAG_V1))
701				goto skip;
702
703			if ((flags & NFE_TX_ERROR_V1) != 0) {
704				DPRINTF(("tx error 0x%04x\n", flags));
705				ifp->if_oerrors++;
706			} else
707				ifp->if_opackets++;
708		} else {
709			if (!(flags & NFE_TX_LASTFRAG_V2))
710				goto skip;
711
712			if ((flags & NFE_TX_ERROR_V2) != 0) {
713				DPRINTF(("tx error 0x%04x\n", flags));
714				ifp->if_oerrors++;
715			} else
716				ifp->if_opackets++;
717		}
718
719		if (data->m == NULL) {	/* should not get there */
720			DPRINTF(("last fragment bit w/o associated mbuf!\n"));
721			goto skip;
722		}
723
724		/* last fragment of the mbuf chain transmitted */
725		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
726		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
727		bus_dmamap_unload(sc->sc_dmat, data->active);
728		m_freem(data->m);
729		data->m = NULL;
730
731skip:		sc->txq.queued--;
732		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
733	}
734
735	ifp->if_timer = 0;
736	ifp->if_flags &= ~IFF_OACTIVE;
737	nfe_start(ifp);
738}
739
740int
741nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
742{
743	struct nfe_desc32 *desc32;
744	struct nfe_desc64 *desc64;
745	struct nfe_tx_data *data;
746	struct mbuf *mnew;
747	bus_dmamap_t map;
748	uint32_t txctl = NFE_RXTX_KICKTX;
749	uint16_t flags = NFE_TX_VALID;
750	int error, i;
751
752	map = sc->txq.data[sc->txq.cur].map;
753
754	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
755	if (error != 0 && error != EFBIG) {
756		printf("%s: could not map mbuf (error %d)\n",
757		    sc->sc_dev.dv_xname, error);
758		m_freem(m0);
759		return error;
760	}
761	if (error != 0) {
762		/* too many fragments, linearize */
763
764		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
765		if (mnew == NULL) {
766			m_freem(m0);
767			return ENOMEM;
768		}
769
770		M_DUP_PKTHDR(mnew, m0);
771		if (m0->m_pkthdr.len > MHLEN) {
772			MCLGET(mnew, M_DONTWAIT);
773			if (!(mnew->m_flags & M_EXT)) {
774				m_freem(m0);
775				m_freem(mnew);
776				return ENOMEM;
777			}
778		}
779
780		m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t));
781		m_freem(m0);
782		mnew->m_len = mnew->m_pkthdr.len;
783		m0 = mnew;
784
785		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
786		    BUS_DMA_NOWAIT);
787		if (error != 0) {
788			printf("%s: could not map mbuf (error %d)\n",
789			    sc->sc_dev.dv_xname, error);
790			m_freem(m0);
791			return error;
792		}
793	}
794
795	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
796		bus_dmamap_unload(sc->sc_dmat, map);
797		return ENOBUFS;
798	}
799
800	/* h/w checksum (XXX only if HW_CSUM?) */
801	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
802		flags |= NFE_TX_IP_CSUM;
803	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
804		flags |= NFE_TX_TCP_CSUM;
805
806	for (i = 0; i < map->dm_nsegs; i++) {
807		data = &sc->txq.data[sc->txq.cur];
808
809		if (sc->sc_flags & NFE_40BIT_ADDR) {
810			desc64 = &sc->txq.desc64[sc->txq.cur];
811#if defined(__amd64__)
812			desc64->physaddr[0] =
813			    htole32(map->dm_segs[i].ds_addr >> 32);
814#endif
815			desc64->physaddr[1] =
816			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
817			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
818			desc64->flags = htole16(flags);
819
820			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
821		} else {
822			desc32 = &sc->txq.desc32[sc->txq.cur];
823
824			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
825			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
826			desc32->flags = htole16(flags);
827
828			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
829		}
830
831		/* csum flags belong to the first fragment only */
832		if (map->dm_nsegs > 1)
833			flags &= ~(M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT);
834
835		sc->txq.queued++;
836		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
837	}
838
839	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
840	if (sc->sc_flags & NFE_40BIT_ADDR) {
841		txctl |= NFE_RXTX_V3MAGIC;
842		flags |= NFE_TX_LASTFRAG_V2;
843
844		desc64->flags = htole16(flags);
845		nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
846	} else {
847		if (sc->sc_flags & NFE_JUMBO_SUP) {
848			txctl |= NFE_RXTX_V2MAGIC;
849			flags |= NFE_TX_LASTFRAG_V2;
850		} else
851			flags |= NFE_TX_LASTFRAG_V1;
852
853		desc32->flags = htole16(flags);
854		nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
855	}
856
857	if (sc->sc_flags & NFE_HW_CSUM)
858		txctl |= NFE_RXTX_RXCHECK;
859
860	data->m = m0;
861	data->active = map;
862
863	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
864	    BUS_DMASYNC_PREWRITE);
865
866	/* kick Tx */
867	NFE_WRITE(sc, NFE_RXTX_CTL, txctl);
868
869	return 0;
870}
871
872void
873nfe_start(struct ifnet *ifp)
874{
875	struct nfe_softc *sc = ifp->if_softc;
876	struct mbuf *m0;
877
878	for (;;) {
879		IFQ_POLL(&ifp->if_snd, m0);
880		if (m0 == NULL)
881			break;
882
883		if (nfe_encap(sc, m0) != 0) {
884			ifp->if_flags |= IFF_OACTIVE;
885			break;
886		}
887
888		/* packet put in h/w queue, remove from s/w queue */
889		IFQ_DEQUEUE(&ifp->if_snd, m0);
890
891#if NBPFILTER > 0
892		if (ifp->if_bpf != NULL)
893			bpf_mtap(ifp->if_bpf, m0);
894#endif
895
896		/* start watchdog timer */
897		ifp->if_timer = 5;
898	}
899}
900
901void
902nfe_watchdog(struct ifnet *ifp)
903{
904	struct nfe_softc *sc = ifp->if_softc;
905
906	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
907
908	ifp->if_flags &= ~IFF_RUNNING;
909	nfe_init(ifp);
910
911	ifp->if_oerrors++;
912}
913
914int
915nfe_init(struct ifnet *ifp)
916{
917	struct nfe_softc *sc = ifp->if_softc;
918	uint32_t rxtxctl;
919
920	nfe_stop(ifp, 0);
921
922	NFE_WRITE(sc, NFE_TX_UNK, 0);
923
924	rxtxctl = NFE_RXTX_BIT2;
925	if (sc->sc_flags & NFE_40BIT_ADDR)
926		rxtxctl |= NFE_RXTX_V3MAGIC;
927	else if (sc->sc_flags & NFE_JUMBO_SUP)
928		rxtxctl |= NFE_RXTX_V2MAGIC;
929	if (sc->sc_flags & NFE_HW_CSUM)
930		rxtxctl |= NFE_RXTX_RXCHECK;
931
932	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
933	DELAY(10);
934	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
935
936	NFE_WRITE(sc, NFE_SETUP_R6, 0);
937
938	/* set MAC address */
939	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
940
941	/* tell MAC where rings are in memory */
942	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
943	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
944
945	NFE_WRITE(sc, NFE_RING_SIZE,
946	    (NFE_RX_RING_COUNT - 1) << 16 |
947	    (NFE_TX_RING_COUNT - 1));
948
949	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
950	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
951	NFE_WRITE(sc, NFE_TIMER_INT, 970);	/* XXX Magic */
952
953	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
954	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
955
956	rxtxctl &= ~NFE_RXTX_BIT2;
957	NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
958	DELAY(10);
959	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl);
960
961	/* enable Rx */
962	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
963
964	/* enable Tx */
965	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
966
967	nfe_setmulti(sc);
968
969	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
970
971	/* enable interrupts */
972	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
973
974	mii_mediachg(&sc->sc_mii);
975
976	timeout_set(&sc->sc_timeout, nfe_tick, sc);
977
978	ifp->if_flags |= IFF_RUNNING;
979	ifp->if_flags &= ~IFF_OACTIVE;
980
981	return 0;
982}
983
984void
985nfe_stop(struct ifnet *ifp, int disable)
986{
987	struct nfe_softc *sc = ifp->if_softc;
988
989	timeout_del(&sc->sc_timeout);
990
991	ifp->if_timer = 0;
992	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
993
994	/* abort Tx */
995	NFE_WRITE(sc, NFE_TX_CTL, 0);
996
997	/* disable Rx */
998	NFE_WRITE(sc, NFE_RX_CTL, 0);
999
1000	/* disable interrupts */
1001	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1002
1003	/* reset Tx and Rx rings */
1004	nfe_reset_tx_ring(sc, &sc->txq);
1005	nfe_reset_rx_ring(sc, &sc->rxq);
1006}
1007
1008int
1009nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1010{
1011	struct nfe_rx_data *data;
1012	struct nfe_desc32 *desc32;
1013	struct nfe_desc64 *desc64;
1014	void **desc;
1015	int i, nsegs, error, descsize;
1016
1017	if (sc->sc_flags & NFE_40BIT_ADDR) {
1018		desc = (void **)&ring->desc64;
1019		descsize = sizeof (struct nfe_desc64);
1020	} else {
1021		desc = (void **)&ring->desc32;
1022		descsize = sizeof (struct nfe_desc32);
1023	}
1024
1025	ring->cur = ring->next = 0;
1026
1027	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1028	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1029	if (error != 0) {
1030		printf("%s: could not create desc DMA map\n",
1031		    sc->sc_dev.dv_xname);
1032		goto fail;
1033	}
1034
1035	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1036	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1037	if (error != 0) {
1038		printf("%s: could not allocate DMA memory\n",
1039		    sc->sc_dev.dv_xname);
1040		goto fail;
1041	}
1042
1043	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1044	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1045	if (error != 0) {
1046		printf("%s: could not map desc DMA memory\n",
1047		    sc->sc_dev.dv_xname);
1048		goto fail;
1049	}
1050
1051	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1052	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1053	if (error != 0) {
1054		printf("%s: could not load desc DMA map\n",
1055		    sc->sc_dev.dv_xname);
1056		goto fail;
1057	}
1058
1059	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1060	ring->physaddr = ring->map->dm_segs->ds_addr;
1061
1062	/*
1063	 * Pre-allocate Rx buffers and populate Rx ring.
1064	 */
1065	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1066		data = &sc->rxq.data[i];
1067
1068		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1069		    0, BUS_DMA_NOWAIT, &data->map);
1070		if (error != 0) {
1071			printf("%s: could not create DMA map\n",
1072			    sc->sc_dev.dv_xname);
1073			goto fail;
1074		}
1075
1076		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1077		if (data->m == NULL) {
1078			printf("%s: could not allocate rx mbuf\n",
1079			    sc->sc_dev.dv_xname);
1080			error = ENOMEM;
1081			goto fail;
1082		}
1083
1084		MCLGET(data->m, M_DONTWAIT);
1085		if (!(data->m->m_flags & M_EXT)) {
1086			printf("%s: could not allocate rx mbuf cluster\n",
1087			    sc->sc_dev.dv_xname);
1088			error = ENOMEM;
1089			goto fail;
1090		}
1091
1092		error = bus_dmamap_load(sc->sc_dmat, data->map,
1093		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
1094		if (error != 0) {
1095			printf("%s: could not load rx buf DMA map",
1096			    sc->sc_dev.dv_xname);
1097			goto fail;
1098		}
1099
1100		if (sc->sc_flags & NFE_40BIT_ADDR) {
1101			desc64 = &sc->rxq.desc64[i];
1102#if defined(__amd64__)
1103			desc64->physaddr[0] =
1104			    htole32(data->map->dm_segs->ds_addr >> 32);
1105#endif
1106			desc64->physaddr[1] =
1107			    htole32(data->map->dm_segs->ds_addr & 0xffffffff);
1108			desc64->length = htole16(MCLBYTES);
1109			desc64->flags = htole16(NFE_RX_READY);
1110		} else {
1111			desc32 = &sc->rxq.desc32[i];
1112			desc32->physaddr =
1113			    htole32(data->map->dm_segs->ds_addr);
1114			desc32->length = htole16(MCLBYTES);
1115			desc32->flags = htole16(NFE_RX_READY);
1116		}
1117	}
1118
1119	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1120	    BUS_DMASYNC_PREWRITE);
1121
1122	return 0;
1123
1124fail:	nfe_free_rx_ring(sc, ring);
1125	return error;
1126}
1127
1128void
1129nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1130{
1131	int i;
1132
1133	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1134		if (sc->sc_flags & NFE_40BIT_ADDR) {
1135			ring->desc64[i].length = htole16(MCLBYTES);
1136			ring->desc64[i].flags = htole16(NFE_RX_READY);
1137		} else {
1138			ring->desc32[i].length = htole16(MCLBYTES);
1139			ring->desc32[i].flags = htole16(NFE_RX_READY);
1140		}
1141	}
1142
1143	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1144	    BUS_DMASYNC_PREWRITE);
1145
1146	ring->cur = ring->next = 0;
1147}
1148
1149void
1150nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1151{
1152	struct nfe_rx_data *data;
1153	void *desc;
1154	int i, descsize;
1155
1156	if (sc->sc_flags & NFE_40BIT_ADDR) {
1157		desc = ring->desc64;
1158		descsize = sizeof (struct nfe_desc64);
1159	} else {
1160		desc = ring->desc32;
1161		descsize = sizeof (struct nfe_desc32);
1162	}
1163
1164	if (desc != NULL) {
1165		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1166		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1167		bus_dmamap_unload(sc->sc_dmat, ring->map);
1168		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1169		    NFE_RX_RING_COUNT * descsize);
1170		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1171	}
1172
1173	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1174		data = &ring->data[i];
1175
1176		if (data->m != NULL) {
1177			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1178			    data->map->dm_mapsize,
1179			    BUS_DMASYNC_POSTREAD);
1180			bus_dmamap_unload(sc->sc_dmat, data->map);
1181			m_freem(data->m);
1182		}
1183
1184		if (data->map != NULL)
1185			bus_dmamap_destroy(sc->sc_dmat, data->map);
1186	}
1187}
1188
1189int
1190nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1191{
1192	int i, nsegs, error;
1193	void **desc;
1194	int descsize;
1195
1196	if (sc->sc_flags & NFE_40BIT_ADDR) {
1197		desc = (void **)&ring->desc64;
1198		descsize = sizeof (struct nfe_desc64);
1199	} else {
1200		desc = (void **)&ring->desc32;
1201		descsize = sizeof (struct nfe_desc32);
1202	}
1203
1204	ring->queued = 0;
1205	ring->cur = ring->next = 0;
1206
1207	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1208	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1209
1210	if (error != 0) {
1211		printf("%s: could not create desc DMA map\n",
1212		    sc->sc_dev.dv_xname);
1213		goto fail;
1214	}
1215
1216	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1217	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1218	if (error != 0) {
1219		printf("%s: could not allocate DMA memory\n",
1220		    sc->sc_dev.dv_xname);
1221		goto fail;
1222	}
1223
1224	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1225	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1226	if (error != 0) {
1227		printf("%s: could not map desc DMA memory\n",
1228		    sc->sc_dev.dv_xname);
1229		goto fail;
1230	}
1231
1232	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1233	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1234	if (error != 0) {
1235		printf("%s: could not load desc DMA map\n",
1236		    sc->sc_dev.dv_xname);
1237		goto fail;
1238	}
1239
1240	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1241	ring->physaddr = ring->map->dm_segs->ds_addr;
1242
1243	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1244		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1245		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
1246		    &ring->data[i].map);
1247		if (error != 0) {
1248			printf("%s: could not create DMA map\n",
1249			    sc->sc_dev.dv_xname);
1250			goto fail;
1251		}
1252	}
1253
1254	return 0;
1255
1256fail:	nfe_free_tx_ring(sc, ring);
1257	return error;
1258}
1259
1260void
1261nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1262{
1263	struct nfe_tx_data *data;
1264	int i;
1265
1266	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1267		if (sc->sc_flags & NFE_40BIT_ADDR)
1268			ring->desc64[i].flags = 0;
1269		else
1270			ring->desc32[i].flags = 0;
1271
1272		data = &ring->data[i];
1273
1274		if (data->m != NULL) {
1275			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1276			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1277			bus_dmamap_unload(sc->sc_dmat, data->map);
1278			m_freem(data->m);
1279			data->m = NULL;
1280		}
1281	}
1282
1283	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1284	    BUS_DMASYNC_PREWRITE);
1285
1286	ring->queued = 0;
1287	ring->cur = ring->next = 0;
1288}
1289
1290void
1291nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1292{
1293	struct nfe_tx_data *data;
1294	void *desc;
1295	int i, descsize;
1296
1297	if (sc->sc_flags & NFE_40BIT_ADDR) {
1298		desc = ring->desc64;
1299		descsize = sizeof (struct nfe_desc64);
1300	} else {
1301		desc = ring->desc32;
1302		descsize = sizeof (struct nfe_desc32);
1303	}
1304
1305	if (desc != NULL) {
1306		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1307		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1308		bus_dmamap_unload(sc->sc_dmat, ring->map);
1309		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1310		    NFE_TX_RING_COUNT * descsize);
1311		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1312	}
1313
1314	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1315		data = &ring->data[i];
1316
1317		if (data->m != NULL) {
1318			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1319			    data->map->dm_mapsize,
1320			    BUS_DMASYNC_POSTWRITE);
1321			bus_dmamap_unload(sc->sc_dmat, data->map);
1322			m_freem(data->m);
1323		}
1324
1325		if (data->map != NULL)
1326			bus_dmamap_destroy(sc->sc_dmat, data->map);
1327	}
1328}
1329
1330int
1331nfe_mediachange(struct ifnet *ifp)
1332{
1333	struct nfe_softc *sc = ifp->if_softc;
1334	struct mii_data	*mii = &sc->sc_mii;
1335	uint32_t val;
1336
1337	DPRINTF(("nfe_mediachange\n"));
1338#if 0
1339	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1340		/* XXX? */
1341	else
1342#endif
1343		val = 0;
1344
1345	val |= NFE_MEDIA_SET;
1346
1347	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1348	case IFM_1000_T:
1349		val |= NFE_MEDIA_1000T;
1350		break;
1351	case IFM_100_TX:
1352		val |= NFE_MEDIA_100TX;
1353		break;
1354	case IFM_10_T:
1355		val |= NFE_MEDIA_10T;
1356		break;
1357	}
1358
1359	DPRINTF(("nfe_miibus_statchg: val=0x%x\n", val));
1360	NFE_WRITE(sc, NFE_LINKSPEED, val);
1361
1362	return 0;
1363}
1364
1365void
1366nfe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1367{
1368	struct nfe_softc *sc = ifp->if_softc;
1369
1370	mii_pollstat(&sc->sc_mii);
1371	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1372	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1373}
1374
1375void
1376nfe_setmulti(struct nfe_softc *sc)
1377{
1378	struct arpcom *ac = &sc->sc_arpcom;
1379	struct ifnet *ifp = &ac->ac_if;
1380	struct ether_multi *enm;
1381	struct ether_multistep step;
1382	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1383	uint32_t filter = NFE_RXFILTER_MAGIC;
1384	int i;
1385
1386	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1387		bzero(addr, ETHER_ADDR_LEN);
1388		bzero(mask, ETHER_ADDR_LEN);
1389		goto done;
1390	}
1391
1392	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1393	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1394
1395	ETHER_FIRST_MULTI(step, ac, enm);
1396	while (enm != NULL) {
1397		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1398			ifp->if_flags |= IFF_ALLMULTI;
1399			bzero(addr, ETHER_ADDR_LEN);
1400			bzero(mask, ETHER_ADDR_LEN);
1401			goto done;
1402		}
1403		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1404			addr[i] &=  enm->enm_addrlo[i];
1405			mask[i] &= ~enm->enm_addrlo[i];
1406		}
1407		ETHER_NEXT_MULTI(step, enm);
1408	}
1409	for (i = 0; i < ETHER_ADDR_LEN; i++)
1410		mask[i] |= addr[i];
1411
1412done:
1413	addr[0] |= 0x01;	/* make sure multicast bit is set */
1414
1415	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1416	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1417	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1418	    addr[5] <<  8 | addr[4]);
1419	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1420	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1421	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1422	    mask[5] <<  8 | mask[4]);
1423
1424	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1425	NFE_WRITE(sc, NFE_RXFILTER, filter);
1426}
1427
1428void
1429nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1430{
1431	uint32_t tmp;
1432
1433	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1434	addr[0] = (tmp >> 8) & 0xff;
1435	addr[1] = (tmp & 0xff);
1436
1437	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1438	addr[2] = (tmp >> 24) & 0xff;
1439	addr[3] = (tmp >> 16) & 0xff;
1440	addr[4] = (tmp >>  8) & 0xff;
1441	addr[5] = (tmp & 0xff);
1442}
1443
1444void
1445nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1446{
1447	NFE_WRITE(sc, NFE_MACADDR_LO,
1448	    addr[5] <<  8 | addr[4]);
1449	NFE_WRITE(sc, NFE_MACADDR_HI,
1450	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1451}
1452
1453void
1454nfe_tick(void *arg)
1455{
1456	struct nfe_softc *sc = arg;
1457	int s;
1458
1459	s = splnet();
1460	mii_tick(&sc->sc_mii);
1461	splx(s);
1462
1463	timeout_add(&sc->sc_timeout, hz);
1464}
1465