if_nfe.c revision 1.5
1/*	$OpenBSD: if_nfe.c,v 1.5 2006/01/14 04:33:35 jsg Exp $	*/
2/*
3 * Copyright (c) 2005 Jonathan Gray <jsg@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/* Driver for nvidia nForce Ethernet */
19
20#include "bpfilter.h"
21#include "vlan.h"
22
23#include <sys/param.h>
24#include <sys/endian.h>
25#include <sys/systm.h>
26#include <sys/types.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/malloc.h>
30#include <sys/kernel.h>
31#include <sys/device.h>
32#include <sys/socket.h>
33
34#include <machine/bus.h>
35
36#include <net/if.h>
37#include <net/if_dl.h>
38#include <net/if_media.h>
39
40#ifdef INET
41#include <netinet/in.h>
42#include <netinet/in_systm.h>
43#include <netinet/in_var.h>
44#include <netinet/ip.h>
45#include <netinet/if_ether.h>
46#endif
47
48#if NVLAN > 0
49#include <net/if_types.h>
50#include <net/if_vlan_var.h>
51#endif
52
53#if NBPFILTER > 0
54#include <net/bpf.h>
55#endif
56
57#include <dev/mii/mii.h>
58#include <dev/mii/miivar.h>
59
60#include <dev/pci/pcireg.h>
61#include <dev/pci/pcivar.h>
62#include <dev/pci/pcidevs.h>
63
64#include <dev/pci/if_nfereg.h>
65#include <dev/pci/if_nfevar.h>
66
67int	nfe_match(struct device *, void *, void *);
68void	nfe_attach(struct device *, struct device *, void *);
69
70int	nfe_intr(void *);
71int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
72void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
73void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
74int	nfe_rxintr(struct nfe_softc *);
75int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
76void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
77void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
78int	nfe_txintr(struct nfe_softc *);
79
80int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
81void	nfe_start(struct ifnet *);
82void	nfe_stop(struct ifnet *, int);
83void	nfe_watchdog(struct ifnet *);
84int	nfe_init(struct ifnet *);
85void	nfe_reset(struct nfe_softc *);
86void	nfe_setmulti(struct nfe_softc *);
87void	nfe_update_promisc(struct nfe_softc *);
88void	nfe_tick(void *);
89
90int	nfe_miibus_readreg(struct device *, int, int);
91void	nfe_miibus_writereg(struct device *, int, int, int);
92void	nfe_miibus_statchg(struct device *);
93int	nfe_mediachange(struct ifnet *);
94void	nfe_mediastatus(struct ifnet *, struct ifmediareq *);
95
96struct cfattach nfe_ca = {
97	sizeof(struct nfe_softc),
98	nfe_match,
99	nfe_attach
100};
101
102struct cfdriver nfe_cd = {
103	0, "nfe", DV_IFNET
104};
105
106
107#ifdef NFE_DEBUG
108int	nfedebug = 1;
109#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
110#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
111#else
112#define DPRINTF(x)
113#define DPRINTFN(n,x)
114#endif
115
116const struct pci_matchid nfe_devices[] = {
117	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
118	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
119	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
120	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
132};
133
134int
135nfe_match(struct device *dev, void *match, void *aux)
136{
137	return (pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
138	    sizeof(nfe_devices)/sizeof(nfe_devices[0])));
139}
140
141void
142nfe_attach(struct device *parent, struct device *self, void *aux)
143{
144	struct nfe_softc	*sc = (struct nfe_softc *)self;
145	struct pci_attach_args	*pa = aux;
146	pci_chipset_tag_t	pc = pa->pa_pc;
147	pci_intr_handle_t	ih;
148	const char		*intrstr = NULL;
149	struct ifnet		*ifp;
150	bus_size_t		iosize;
151	pcireg_t		command;
152	int			i;
153
154	/*
155	 * Map control/status registers.
156	 */
157	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
158	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
159
160	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
161	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
162
163	if ((command & PCI_COMMAND_MEM_ENABLE) == 0) {
164		printf(": mem space not enabled\n");
165		return;
166	}
167
168	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
169	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0)) {
170		printf(": can't map mem space\n");
171		return;
172	}
173
174	/* Allocate interrupt */
175	if (pci_intr_map(pa, &ih)) {
176		printf(": couldn't map interrupt\n");
177		return;
178	}
179	intrstr = pci_intr_string(pc, ih);
180	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
181	    sc->sc_dev.dv_xname);
182	if (sc->sc_ih == NULL) {
183		printf(": couldn't establish interrupt");
184		if (intrstr != NULL)
185			printf(" at %s", intrstr);
186		return;
187	}
188	printf(": %s", intrstr);
189
190	sc->sc_dmat = pa->pa_dmat;
191
192	i = betoh16(NFE_READ(sc, NFE_MACADDR_LO) & 0xffff);
193	memcpy((char *)sc->sc_arpcom.ac_enaddr, &i, 2);
194	i = betoh32(NFE_READ(sc, NFE_MACADDR_HI));
195	memcpy(&(sc->sc_arpcom.ac_enaddr[2]), &i, 4);
196
197	printf(", address %s\n",
198	    ether_sprintf(sc->sc_arpcom.ac_enaddr));
199
200	switch(PCI_PRODUCT(pa->pa_id)) {
201	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
202	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
203	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
204	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
205		sc->sc_flags |= NFE_JUMBO_SUP;
206		break;
207	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
208	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
209	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
210	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
211	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
212	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
213	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
214	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
215		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
216		break;
217	default:
218		sc->sc_flags = 0;
219	}
220
221	/*
222	 * Allocate Tx and Rx rings.
223	 */
224	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
225		printf("%s: could not allocate Tx ring\n",
226		    sc->sc_dev.dv_xname);
227		goto fail1;
228	}
229
230	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
231		printf("%s: could not allocate Rx ring\n",
232		    sc->sc_dev.dv_xname);
233		goto fail2;
234	}
235
236	NFE_WRITE(sc, NFE_RING_SIZE, NFE_RX_RING_COUNT << 16 |
237	    NFE_TX_RING_COUNT);
238
239	ifp = &sc->sc_arpcom.ac_if;
240	ifp->if_softc = sc;
241	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
242	ifp->if_ioctl = nfe_ioctl;
243	ifp->if_start = nfe_start;
244	ifp->if_watchdog = nfe_watchdog;
245	ifp->if_init = nfe_init;
246	ifp->if_baudrate = 1000000000;
247	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
248	IFQ_SET_READY(&ifp->if_snd);
249
250	/* Set interface name */
251	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
252
253	sc->sc_mii.mii_ifp = ifp;
254	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
255	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
256	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
257
258	/* XXX always seem to get a ghost ukphy along with eephy on nf4u */
259	ifmedia_init(&sc->sc_mii.mii_media, 0,
260	    nfe_mediachange, nfe_mediastatus);
261	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
262	    MII_OFFSET_ANY, 0);
263	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
264		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
265		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
266		    0, NULL);
267		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
268	} else
269		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
270
271	if_attach(ifp);
272	ether_ifattach(ifp);
273
274	/* XXX powerhook */
275	return;
276
277fail2:
278	nfe_free_tx_ring(sc, &sc->txq);
279fail1:
280	nfe_free_rx_ring(sc, &sc->rxq);
281
282	return;
283}
284
285void
286nfe_miibus_statchg(struct device *dev)
287{
288	struct nfe_softc *sc = (struct nfe_softc *) dev;
289	struct mii_data	*mii = &sc->sc_mii;
290	uint32_t reg;
291
292	reg = NFE_READ(sc, NFE_PHY_INT);
293
294	switch (IFM_SUBTYPE(mii->mii_media_active)) {
295	case IFM_1000_T:
296		reg |= NFE_PHY_1000T;
297		break;
298	case IFM_100_TX:
299		reg |= NFE_PHY_100TX;
300		break;
301	}
302	NFE_WRITE(sc, NFE_PHY_INT, reg);
303}
304
305int
306nfe_miibus_readreg(struct device *dev, int phy, int reg)
307{
308	struct nfe_softc *sc = (struct nfe_softc *) dev;
309	uint32_t r;
310
311	r = NFE_READ(sc, NFE_PHY_CTL);
312	if (r & NFE_PHY_BUSY) {
313		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
314		delay(100);
315	}
316
317	NFE_WRITE(sc, NFE_PHY_CTL, reg | (phy << NFE_PHYADD_SHIFT));
318	delay(1000);
319	r = NFE_READ(sc, NFE_PHY_DATA);
320	if (r != 0xffffffff && r != 0)
321		sc->phyaddr = phy;
322
323	DPRINTFN(2, ("nfe mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, r));
324
325	return (r);
326}
327
328void
329nfe_miibus_writereg(struct device *dev, int phy, int reg, int data)
330{
331	struct nfe_softc *sc = (struct nfe_softc *) dev;
332	uint32_t r;
333
334	r = NFE_READ(sc, NFE_PHY_CTL);
335	if (r & NFE_PHY_BUSY) {
336		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
337		delay(100);
338	}
339
340	NFE_WRITE(sc, NFE_PHY_DATA, data);
341	r = reg | (phy << NFE_PHYADD_SHIFT) | NFE_PHY_WRITE;
342	NFE_WRITE(sc, NFE_PHY_CTL, r);
343}
344
345int
346nfe_intr(void *arg)
347{
348	struct nfe_softc *sc = arg;
349	uint32_t r;
350
351	/* disable interrupts */
352	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
353
354	r = NFE_READ(sc, NFE_IRQ_STATUS);
355	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
356
357	if (r == 0) {
358		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
359		return(0);
360	}
361
362	if (r & NFE_IRQ_RX)
363		nfe_rxintr(sc);
364
365	if (r & NFE_IRQ_TX_DONE)
366		nfe_txintr(sc);
367
368	DPRINTF(("nfe_intr: interrupt register %x", r));
369
370	/* re-enable interrupts */
371	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
372
373	return (1);
374}
375
376int
377nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
378{
379	struct nfe_softc *sc = ifp->if_softc;
380	struct ifreq *ifr = (struct ifreq *)data;
381	struct ifaddr *ifa = (struct ifaddr *) data;
382	int s, error = 0;
383
384	s = splnet();
385
386	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
387		splx(s);
388		return (error);
389	}
390
391	switch (cmd) {
392	case SIOCSIFMTU:
393		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
394			error = EINVAL;
395		else if (ifp->if_mtu != ifr->ifr_mtu)
396			ifp->if_mtu = ifr->ifr_mtu;
397		break;
398	case SIOCSIFMEDIA:
399	case SIOCGIFMEDIA:
400		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
401		break;
402	case SIOCSIFADDR:
403		ifp->if_flags |= IFF_UP;
404		switch (ifa->ifa_addr->sa_family) {
405#ifdef INET
406		case AF_INET:
407			nfe_init(ifp);
408			arp_ifinit(&sc->sc_arpcom, ifa);
409			break;
410#endif
411		default:
412			nfe_init(ifp);
413			break;
414		}
415		break;
416	case SIOCADDMULTI:
417	case SIOCDELMULTI:
418		if (cmd == SIOCADDMULTI)
419			error = ether_addmulti(ifr, &sc->sc_arpcom);
420		else
421			error = ether_delmulti(ifr, &sc->sc_arpcom);
422
423		if (error == ENETRESET) {
424			if (ifp->if_flags & IFF_RUNNING)
425				nfe_setmulti(sc);
426			error = 0;
427		}
428		break;
429	case SIOCSIFFLAGS:
430		if (ifp->if_flags & IFF_UP) {
431			if (ifp->if_flags & IFF_RUNNING)
432				nfe_update_promisc(sc);
433			else
434				nfe_init(ifp);
435		} else {
436			if (ifp->if_flags & IFF_RUNNING)
437				nfe_stop(ifp, 1);
438		}
439		break;
440	default:
441		error = EINVAL;
442	}
443	splx(s);
444
445	return (error);
446}
447
448void
449nfe_start(struct ifnet *ifp)
450{
451}
452
453void
454nfe_stop(struct ifnet *ifp, int disable)
455{
456	struct nfe_softc *sc = ifp->if_softc;
457
458	timeout_del(&sc->sc_timeout);
459
460	ifp->if_timer = 0;
461	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
462
463	/* abort Tx */
464	NFE_WRITE(sc, NFE_TX_CTL, 0);
465
466	/* disable Rx */
467	NFE_WRITE(sc, NFE_RX_CTL, 0);
468
469	/* disable interrupts */
470	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
471
472	/* reset Tx and Rx rings */
473	nfe_reset_tx_ring(sc, &sc->txq);
474	nfe_reset_rx_ring(sc, &sc->rxq);
475}
476
477void
478nfe_watchdog(struct ifnet *ifp)
479{
480}
481
482int
483nfe_init(struct ifnet *ifp)
484{
485	struct nfe_softc	*sc = ifp->if_softc;
486	int r;
487
488	nfe_stop(ifp, 0);
489
490	NFE_WRITE(sc, NFE_TX_UNK, 0);
491
492	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
493	delay(10);
494
495	r = NFE_RXTX_BIT2;
496	if (sc->sc_flags & NFE_40BIT_ADDR)
497		r |= NFE_RXTX_V3MAGIC|NFE_RXTX_RXCHECK;
498	else if (sc->sc_flags & NFE_JUMBO_SUP)
499		r |= NFE_RXTX_V2MAGIC|NFE_RXTX_RXCHECK;
500
501	NFE_WRITE(sc, NFE_RXTX_CTL, r);
502
503	NFE_WRITE(sc, NFE_SETUP_R6, 0);
504
505	/* XXX set MAC address */
506
507	/* Tell MAC where rings are in memory */
508	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
509	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
510
511	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
512	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
513	NFE_WRITE(sc, NFE_TIMER_INT, 970);		/* XXX Magic */
514
515	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
516	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
517
518	/* enable Rx */
519	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
520
521	nfe_setmulti(sc);
522
523	/* enable interrupts */
524	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_RXERR | NFE_IRQ_RX |
525	    NFE_IRQ_RX_NOBUF | NFE_IRQ_TXERR | NFE_IRQ_TX_DONE | NFE_IRQ_LINK |
526	    NFE_IRQ_TXERR2);
527
528	mii_mediachg(&sc->sc_mii);
529
530	timeout_set(&sc->sc_timeout, nfe_tick, sc);
531
532	ifp->if_flags |= IFF_RUNNING;
533	ifp->if_flags &= ~IFF_OACTIVE;
534
535	return (0);
536}
537
538void
539nfe_reset(struct nfe_softc *sc)
540{
541	printf("nfe_reset!\n");
542}
543
544int
545nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
546{
547	struct nfe_rx_data *data;
548	struct nfe_desc *desc_v1;
549	struct nfe_desc_v3 *desc_v3;
550	void **desc;
551	int i, nsegs, error, descsize;
552
553	if (sc->sc_flags & NFE_40BIT_ADDR) {
554		desc = (void **)&ring->desc_v3;
555		descsize = sizeof(struct nfe_desc_v3);
556	} else {
557		desc = (void **)&ring->desc_v1;
558		descsize = sizeof(struct nfe_desc);
559	}
560
561	ring->cur = ring->next = 0;
562
563	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
564	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
565	if (error != 0) {
566		printf("%s: could not create desc DMA map\n",
567		    sc->sc_dev.dv_xname);
568		goto fail;
569	}
570
571	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
572	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
573	if (error != 0) {
574		printf("%s: could not allocate DMA memory\n",
575		    sc->sc_dev.dv_xname);
576		goto fail;
577	}
578
579	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
580	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
581	if (error != 0) {
582		printf("%s: could not map desc DMA memory\n",
583		    sc->sc_dev.dv_xname);
584		goto fail;
585	}
586
587	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
588	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
589	if (error != 0) {
590		printf("%s: could not load desc DMA map\n",
591		    sc->sc_dev.dv_xname);
592		goto fail;
593	}
594
595	memset(*desc, 0, NFE_RX_RING_COUNT * descsize);
596	ring->physaddr = ring->map->dm_segs->ds_addr;
597
598	/*
599	 * Pre-allocate Rx buffers and populate Rx ring.
600	 */
601
602	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
603		data = &sc->rxq.data[i];
604
605		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
606		    0, BUS_DMA_NOWAIT, &data->map);
607		if (error != 0) {
608			printf("%s: could not create DMA map\n",
609			    sc->sc_dev.dv_xname);
610			goto fail;
611		}
612
613		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
614		if (data->m == NULL) {
615			printf("%s: could not allocate rx mbuf\n",
616			    sc->sc_dev.dv_xname);
617			error = ENOMEM;
618			goto fail;
619		}
620
621		MCLGET(data->m, M_DONTWAIT);
622		if (!(data->m->m_flags & M_EXT)) {
623			printf("%s: could not allocate rx mbuf cluster\n",
624			    sc->sc_dev.dv_xname);
625			error = ENOMEM;
626			goto fail;
627		}
628
629		error = bus_dmamap_load(sc->sc_dmat, data->map,
630		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
631		if (error != 0) {
632			printf("%s: could not load rx buf DMA map",
633			    sc->sc_dev.dv_xname);
634			goto fail;
635		}
636
637		if (sc->sc_flags & NFE_40BIT_ADDR) {
638			desc_v3 = &sc->rxq.desc_v3[i];
639			desc_v3->physaddr[0] =
640#if 0
641			(htole64(data->map->dm_segs->ds_addr) >> 32) & 0xffffffff;
642#endif
643			    0;
644			desc_v3->physaddr[1] =
645			    htole64(data->map->dm_segs->ds_addr) & 0xffffffff;
646
647			desc_v3->length = htole16(MCLBYTES);
648			desc_v3->flags = htole16(NFE_RX_READY);
649		} else {
650			desc_v1 = &sc->rxq.desc_v1[i];
651			desc_v1->physaddr =
652			    htole32(data->map->dm_segs->ds_addr);
653			desc_v1->length = htole16(MCLBYTES);
654			desc_v1->flags = htole16(NFE_RX_READY);
655		}
656	}
657
658	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
659	    BUS_DMASYNC_PREWRITE);
660
661	return 0;
662
663fail:	nfe_free_rx_ring(sc, ring);
664	return error;
665}
666
667void
668nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
669{
670	int i;
671
672	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
673		if (sc->sc_flags & NFE_40BIT_ADDR) {
674			ring->desc_v3[i].length = htole16(MCLBYTES);
675			ring->desc_v3[i].flags = htole16(NFE_RX_READY);
676		} else {
677			ring->desc_v1[i].length = htole16(MCLBYTES);
678			ring->desc_v1[i].flags = htole16(NFE_RX_READY);
679		}
680	}
681
682	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
683	    BUS_DMASYNC_PREWRITE);
684
685	ring->cur = ring->next = 0;
686}
687
688void
689nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
690{
691	struct nfe_rx_data *data;
692	void *desc;
693	int i, descsize;
694
695	if (sc->sc_flags & NFE_40BIT_ADDR) {
696		desc = ring->desc_v3;
697		descsize = sizeof(struct nfe_desc_v3);
698	} else {
699		desc = ring->desc_v1;
700		descsize = sizeof(struct nfe_desc);
701	}
702
703	if (desc != NULL) {
704		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
705		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
706		bus_dmamap_unload(sc->sc_dmat, ring->map);
707		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
708		    NFE_RX_RING_COUNT * descsize);
709		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
710	}
711
712	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
713		data = &ring->data[i];
714
715		if (data->m != NULL) {
716			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
717			    data->map->dm_mapsize,
718			    BUS_DMASYNC_POSTREAD);
719			bus_dmamap_unload(sc->sc_dmat, data->map);
720			m_freem(data->m);
721		}
722
723		if (data->map != NULL)
724			bus_dmamap_destroy(sc->sc_dmat, data->map);
725	}
726}
727
728int
729nfe_rxintr(struct nfe_softc *sc)
730{
731	printf("nfe_rxintr!\n");
732	return (0);
733}
734
735int
736nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
737{
738	int i, nsegs, error;
739	void **desc;
740	int descsize;
741
742	if (sc->sc_flags & NFE_40BIT_ADDR) {
743		desc = (void **)&ring->desc_v3;
744		descsize = sizeof(struct nfe_desc_v3);
745	} else {
746		desc = (void **)&ring->desc_v1;
747		descsize = sizeof(struct nfe_desc);
748	}
749
750	ring->queued = 0;
751	ring->cur = ring->next = 0;
752
753	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
754	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
755
756	if (error != 0) {
757		printf("%s: could not create desc DMA map\n",
758		    sc->sc_dev.dv_xname);
759		goto fail;
760	}
761
762	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
763	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
764	if (error != 0) {
765		printf("%s: could not allocate DMA memory\n",
766		    sc->sc_dev.dv_xname);
767		goto fail;
768	}
769
770	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
771	    NFE_TX_RING_COUNT * sizeof(struct nfe_desc), (caddr_t *)desc,
772	    BUS_DMA_NOWAIT);
773	if (error != 0) {
774		printf("%s: could not map desc DMA memory\n",
775		    sc->sc_dev.dv_xname);
776		goto fail;
777	}
778
779	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
780	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
781	if (error != 0) {
782		printf("%s: could not load desc DMA map\n",
783		    sc->sc_dev.dv_xname);
784		goto fail;
785	}
786
787	memset(*desc, 0, NFE_TX_RING_COUNT * descsize);
788	ring->physaddr = ring->map->dm_segs->ds_addr;
789
790	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
791		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
792		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
793		    &ring->data[i].map);
794		if (error != 0) {
795			printf("%s: could not create DMA map\n",
796			    sc->sc_dev.dv_xname);
797			goto fail;
798		}
799	}
800
801	return 0;
802
803fail:	nfe_free_tx_ring(sc, ring);
804	return error;
805}
806
807void
808nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
809{
810	void *desc;
811	struct nfe_tx_data *data;
812	int i;
813
814	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
815		if (sc->sc_flags & NFE_40BIT_ADDR)
816			desc = &ring->desc_v3[i];
817		else
818			desc = &ring->desc_v1[i];
819
820		data = &ring->data[i];
821
822		if (data->m != NULL) {
823			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
824			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
825			bus_dmamap_unload(sc->sc_dmat, data->map);
826			m_freem(data->m);
827			data->m = NULL;
828		}
829
830		if (sc->sc_flags & NFE_40BIT_ADDR)
831			((struct nfe_desc_v3 *)desc)->flags = 0;
832		else
833			((struct nfe_desc *)desc)->flags = 0;
834	}
835
836	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
837	    BUS_DMASYNC_PREWRITE);
838
839	ring->queued = 0;
840	ring->cur = ring->next = 0;
841}
842
843void
844nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
845{
846	struct nfe_tx_data *data;
847	void *desc;
848	int i, descsize;
849
850	if (sc->sc_flags & NFE_40BIT_ADDR) {
851		desc = ring->desc_v3;
852		descsize = sizeof(struct nfe_desc_v3);
853	} else {
854		desc = ring->desc_v1;
855		descsize = sizeof(struct nfe_desc);
856	}
857
858	if (desc != NULL) {
859		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
860		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
861		bus_dmamap_unload(sc->sc_dmat, ring->map);
862		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
863		    NFE_TX_RING_COUNT * descsize);
864		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
865	}
866
867	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
868		data = &ring->data[i];
869
870		if (data->m != NULL) {
871			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
872			    data->map->dm_mapsize,
873			    BUS_DMASYNC_POSTWRITE);
874			bus_dmamap_unload(sc->sc_dmat, data->map);
875			m_freem(data->m);
876		}
877
878		if (data->map != NULL)
879			bus_dmamap_destroy(sc->sc_dmat, data->map);
880	}
881}
882
883int
884nfe_txintr(struct nfe_softc *sc)
885{
886	printf("nfe_txintr!\n");
887	return (0);
888}
889
890int
891nfe_mediachange(struct ifnet *ifp)
892{
893	struct nfe_softc *sc = ifp->if_softc;
894	struct mii_data	*mii = &sc->sc_mii;
895	int val;
896
897	DPRINTF(("nfe_mediachange\n"));
898#if 0
899	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
900		/* XXX? */
901	else
902#endif
903		val = 0;
904
905	val |= NFE_MEDIA_SET;
906
907	switch (IFM_SUBTYPE(mii->mii_media_active)) {
908	case IFM_1000_T:
909		val |= NFE_MEDIA_1000T;
910		break;
911	case IFM_100_TX:
912		val |= NFE_MEDIA_100TX;
913		break;
914	case IFM_10_T:
915		val |= NFE_MEDIA_10T;
916		break;
917	}
918
919	DPRINTF(("nfe_miibus_statchg: val=0x%x\n", val));
920	NFE_WRITE(sc, NFE_LINKSPEED, val);
921	return (0);
922}
923
924void
925nfe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
926{
927	struct nfe_softc *sc = ifp->if_softc;
928
929	mii_pollstat(&sc->sc_mii);
930	ifmr->ifm_status = sc->sc_mii.mii_media_status;
931	ifmr->ifm_active = sc->sc_mii.mii_media_active;
932}
933
934void
935nfe_setmulti(struct nfe_softc *sc)
936{
937	NFE_WRITE(sc, NFE_MULT_ADDR1, 0x01);
938	NFE_WRITE(sc, NFE_MULT_ADDR2, 0);
939	NFE_WRITE(sc, NFE_MULT_MASK1, 0);
940	NFE_WRITE(sc, NFE_MULT_MASK2, 0);
941#ifdef notyet
942	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_MYADDR);
943#else
944	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_PROMISC);
945#endif
946}
947
948void
949nfe_update_promisc(struct nfe_softc *sc)
950{
951}
952
953void
954nfe_tick(void *arg)
955{
956	struct nfe_softc *sc = arg;
957	int s;
958
959	s = splnet();
960	mii_tick(&sc->sc_mii);
961	splx(s);
962
963	timeout_add(&sc->sc_timeout, hz);
964}
965