if_nfe.c revision 1.2
1/*	$OpenBSD: if_nfe.c,v 1.2 2005/12/14 22:08:20 jsg Exp $	*/
2/*
3 * Copyright (c) 2005 Jonathan Gray <jsg@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/* Driver for nvidia nForce Ethernet */
19
20#include "bpfilter.h"
21#include "vlan.h"
22
23#include <sys/param.h>
24#include <sys/endian.h>
25#include <sys/systm.h>
26#include <sys/types.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/malloc.h>
30#include <sys/kernel.h>
31#include <sys/device.h>
32#include <sys/socket.h>
33
34#include <machine/bus.h>
35
36#include <net/if.h>
37#include <net/if_dl.h>
38#include <net/if_media.h>
39
40#ifdef INET
41#include <netinet/in.h>
42#include <netinet/in_systm.h>
43#include <netinet/in_var.h>
44#include <netinet/ip.h>
45#include <netinet/if_ether.h>
46#endif
47
48#if NVLAN > 0
49#include <net/if_types.h>
50#include <net/if_vlan_var.h>
51#endif
52
53#if NBPFILTER > 0
54#include <net/bpf.h>
55#endif
56
57#include <dev/mii/mii.h>
58#include <dev/mii/miivar.h>
59
60#include <dev/pci/pcireg.h>
61#include <dev/pci/pcivar.h>
62#include <dev/pci/pcidevs.h>
63
64#include <dev/pci/if_nfereg.h>
65#include <dev/pci/if_nfevar.h>
66
67int	nfe_match(struct device *, void *, void *);
68void	nfe_attach(struct device *, struct device *, void *);
69
70int	nfe_intr(void *);
71int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *, int);
72void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
73void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
74int	nfe_rxintr(struct nfe_softc *);
75int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *, int);
76void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
77void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
78int	nfe_txintr(struct nfe_softc *);
79
80int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
81void	nfe_start(struct ifnet *);
82void	nfe_stop(struct ifnet *, int);
83void	nfe_watchdog(struct ifnet *);
84int	nfe_init(struct ifnet *);
85void	nfe_reset(struct nfe_softc *);
86void	nfe_setmulti(struct nfe_softc *);
87void	nfe_update_promisc(struct nfe_softc *);
88void	nfe_tick(void *);
89
90int	nfe_miibus_readreg(struct device *, int, int);
91void	nfe_miibus_writereg(struct device *, int, int, int);
92void	nfe_miibus_statchg(struct device *);
93int	nfe_mediachange(struct ifnet *);
94void	nfe_mediastatus(struct ifnet *, struct ifmediareq *);
95
96struct cfattach nfe_ca = {
97	sizeof(struct nfe_softc),
98	nfe_match,
99	nfe_attach
100};
101
102struct cfdriver nfe_cd = {
103	0, "nfe", DV_IFNET
104};
105
106
107#ifdef NFE_DEBUG
108int	nfedebug = 1;
109#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
110#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
111#else
112#define DPRINTF(x)
113#define DPRINTFN(n,x)
114#endif
115
116const struct pci_matchid nfe_devices[] = {
117	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
118	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
119	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
120	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
132};
133
134int
135nfe_match(struct device *dev, void *match, void *aux)
136{
137	return (pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
138	    sizeof(nfe_devices)/sizeof(nfe_devices[0])));
139}
140
141void
142nfe_attach(struct device *parent, struct device *self, void *aux)
143{
144	struct nfe_softc	*sc = (struct nfe_softc *)self;
145	struct pci_attach_args	*pa = aux;
146	pci_chipset_tag_t	pc = pa->pa_pc;
147	pci_intr_handle_t	ih;
148	const char		*intrstr = NULL;
149	struct ifnet		*ifp;
150	bus_size_t		iosize;
151	pcireg_t		command;
152	int			i;
153
154	/*
155	 * Map control/status registers.
156	 */
157	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
158	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
159
160	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
161	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
162
163	if ((command & PCI_COMMAND_MEM_ENABLE) == 0) {
164		printf(": mem space not enabled\n");
165		return;
166	}
167
168	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
169	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0)) {
170		printf(": can't map mem space\n");
171		return;
172	}
173
174	/* Allocate interrupt */
175	if (pci_intr_map(pa, &ih)) {
176		printf(": couldn't map interrupt\n");
177		return;
178	}
179	intrstr = pci_intr_string(pc, ih);
180	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
181	    sc->sc_dev.dv_xname);
182	if (sc->sc_ih == NULL) {
183		printf(": couldn't establish interrupt");
184		if (intrstr != NULL)
185			printf(" at %s", intrstr);
186		return;
187	}
188	printf(": %s", intrstr);
189
190	sc->sc_dmat = pa->pa_dmat;
191
192	i = betoh16(NFE_READ(sc, NFE_MACADDR_LO) & 0xffff);
193	memcpy((char *)sc->sc_arpcom.ac_enaddr, &i, 2);
194	i = betoh32(NFE_READ(sc, NFE_MACADDR_HI));
195	memcpy(&(sc->sc_arpcom.ac_enaddr[2]), &i, 4);
196
197	printf(", address %s\n",
198	    ether_sprintf(sc->sc_arpcom.ac_enaddr));
199
200	switch(PCI_PRODUCT(pa->pa_id)) {
201	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
202	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
203	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
204	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
205		sc->sc_flags |= NFE_JUMBO_SUP;
206		break;
207	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
208	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
209	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
210	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
211	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
212	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
213	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
214	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
215		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
216		break;
217	default:
218		sc->sc_flags = 0;
219	}
220
221	/*
222	 * Allocate Tx and Rx rings.
223	 */
224	if (nfe_alloc_tx_ring(sc, &sc->txq, NFE_TX_RING_COUNT) != 0) {
225		printf("%s: could not allocate Tx ring\n",
226		    sc->sc_dev.dv_xname);
227		goto fail1;
228	}
229
230	if (nfe_alloc_rx_ring(sc, &sc->rxq, NFE_RX_RING_COUNT) != 0) {
231		printf("%s: could not allocate Rx ring\n",
232		    sc->sc_dev.dv_xname);
233		goto fail2;
234	}
235
236	NFE_WRITE(sc, NFE_RING_SIZE, sc->rxq.count << 16 | sc->txq.count);
237
238	ifp = &sc->sc_arpcom.ac_if;
239	ifp->if_softc = sc;
240	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
241	ifp->if_ioctl = nfe_ioctl;
242	ifp->if_start = nfe_start;
243	ifp->if_watchdog = nfe_watchdog;
244	ifp->if_init = nfe_init;
245	ifp->if_baudrate = 1000000000;
246	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
247	IFQ_SET_READY(&ifp->if_snd);
248
249	/* Set interface name */
250	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
251
252	sc->sc_mii.mii_ifp = ifp;
253	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
254	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
255	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
256
257	/* XXX always seem to get a ghost ukphy along with eephy on nf4u */
258	ifmedia_init(&sc->sc_mii.mii_media, 0,
259	    nfe_mediachange, nfe_mediastatus);
260	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
261	    MII_OFFSET_ANY, 0);
262	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
263		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
264		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
265		    0, NULL);
266		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
267	} else
268		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
269
270	if_attach(ifp);
271	ether_ifattach(ifp);
272
273	/* XXX powerhook */
274	return;
275
276fail2:
277	nfe_free_tx_ring(sc, &sc->txq);
278fail1:
279	nfe_free_rx_ring(sc, &sc->rxq);
280
281	return;
282}
283
284void
285nfe_miibus_statchg(struct device *dev)
286{
287	struct nfe_softc *sc = (struct nfe_softc *) dev;
288	struct mii_data	*mii = &sc->sc_mii;
289	uint32_t reg;
290
291	reg = NFE_READ(sc, NFE_PHY_INT);
292
293	switch (IFM_SUBTYPE(mii->mii_media_active)) {
294	case IFM_1000_T:
295		reg |= NFE_PHY_1000T;
296		break;
297	case IFM_100_TX:
298		reg |= NFE_PHY_100TX;
299		break;
300	}
301	NFE_WRITE(sc, NFE_PHY_INT, reg);
302}
303
304int
305nfe_miibus_readreg(struct device *dev, int phy, int reg)
306{
307	struct nfe_softc *sc = (struct nfe_softc *) dev;
308	uint32_t r;
309
310	r = NFE_READ(sc, NFE_PHY_CTL);
311	if (r & NFE_PHY_BUSY) {
312		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
313		delay(100);
314	}
315
316	NFE_WRITE(sc, NFE_PHY_CTL, reg | (phy << NFE_PHYADD_SHIFT));
317	delay(1000);
318	r = NFE_READ(sc, NFE_PHY_DATA);
319	if (r != 0xffffffff && r != 0)
320		sc->phyaddr = phy;
321
322	DPRINTFN(2, ("nfe mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, r));
323
324	return (r);
325}
326
327void
328nfe_miibus_writereg(struct device *dev, int phy, int reg, int data)
329{
330	struct nfe_softc *sc = (struct nfe_softc *) dev;
331	uint32_t r;
332
333	r = NFE_READ(sc, NFE_PHY_CTL);
334	if (r & NFE_PHY_BUSY) {
335		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
336		delay(100);
337	}
338
339	NFE_WRITE(sc, NFE_PHY_DATA, data);
340	r = reg | (phy << NFE_PHYADD_SHIFT) | NFE_PHY_WRITE;
341	NFE_WRITE(sc, NFE_PHY_CTL, r);
342}
343
344int
345nfe_intr(void *arg)
346{
347	struct nfe_softc *sc = arg;
348	uint32_t r;
349
350	/* disable interrupts */
351	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
352
353	r = NFE_READ(sc, NFE_IRQ_STATUS) & 0x1ff;
354
355	if (r == 0) {
356		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
357		return(0);
358	}
359
360	if (r & NFE_IRQ_RX)
361		nfe_rxintr(sc);
362
363	if (r & NFE_IRQ_TX_DONE)
364		nfe_txintr(sc);
365
366	DPRINTF(("nfe_intr: interrupt register %x", r));
367
368	/* re-enable interrupts */
369	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
370
371	return (1);
372}
373
374int
375nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
376{
377	struct nfe_softc *sc = ifp->if_softc;
378	struct ifreq *ifr = (struct ifreq *)data;
379	struct ifaddr *ifa = (struct ifaddr *) data;
380	int s, error = 0;
381
382	s = splnet();
383
384	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
385		splx(s);
386		return (error);
387	}
388
389	switch (cmd) {
390	case SIOCSIFMTU:
391		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
392			error = EINVAL;
393		else if (ifp->if_mtu != ifr->ifr_mtu)
394			ifp->if_mtu = ifr->ifr_mtu;
395		break;
396	case SIOCSIFMEDIA:
397	case SIOCGIFMEDIA:
398		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
399		break;
400	case SIOCSIFADDR:
401		ifp->if_flags |= IFF_UP;
402		switch (ifa->ifa_addr->sa_family) {
403#ifdef INET
404		case AF_INET:
405			nfe_init(ifp);
406			arp_ifinit(&sc->sc_arpcom, ifa);
407			break;
408#endif
409		default:
410			nfe_init(ifp);
411			break;
412		}
413		break;
414	case SIOCADDMULTI:
415	case SIOCDELMULTI:
416		if (cmd == SIOCADDMULTI)
417			error = ether_addmulti(ifr, &sc->sc_arpcom);
418		else
419			error = ether_delmulti(ifr, &sc->sc_arpcom);
420
421		if (error == ENETRESET) {
422			if (ifp->if_flags & IFF_RUNNING)
423				nfe_setmulti(sc);
424			error = 0;
425		}
426		break;
427	case SIOCSIFFLAGS:
428		if (ifp->if_flags & IFF_UP) {
429			if (ifp->if_flags & IFF_RUNNING)
430				nfe_update_promisc(sc);
431			else
432				nfe_init(ifp);
433		} else {
434			if (ifp->if_flags & IFF_RUNNING)
435				nfe_stop(ifp, 1);
436		}
437		break;
438	default:
439		error = EINVAL;
440	}
441	splx(s);
442
443	return (error);
444}
445
446void
447nfe_start(struct ifnet *ifp)
448{
449}
450
451void
452nfe_stop(struct ifnet *ifp, int disable)
453{
454	struct nfe_softc *sc = ifp->if_softc;
455
456	timeout_del(&sc->sc_timeout);
457
458	ifp->if_timer = 0;
459	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
460
461	/* abort Tx */
462	NFE_WRITE(sc, NFE_TX_CTL, 0);
463
464	/* disable Rx */
465	NFE_WRITE(sc, NFE_RX_CTL, 0);
466
467	/* disable interrupts */
468	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
469
470	/* reset Tx and Rx rings */
471	nfe_reset_tx_ring(sc, &sc->txq);
472	nfe_reset_rx_ring(sc, &sc->rxq);
473}
474
475void
476nfe_watchdog(struct ifnet *ifp)
477{
478}
479
480int
481nfe_init(struct ifnet *ifp)
482{
483	struct nfe_softc	*sc = ifp->if_softc;
484	int r;
485
486	nfe_stop(ifp, 0);
487
488	NFE_WRITE(sc, NFE_TX_UNK, 0);
489
490	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
491	delay(10);
492
493	r = NFE_RXTX_BIT2;
494	if (sc->sc_flags & NFE_40BIT_ADDR)
495		r |= NFE_RXTX_V3MAGIC|NFE_RXTX_RXCHECK;
496	else if (sc->sc_flags & NFE_JUMBO_SUP)
497		r |= NFE_RXTX_V2MAGIC|NFE_RXTX_RXCHECK;
498
499	NFE_WRITE(sc, NFE_RXTX_CTL, r);
500
501	NFE_WRITE(sc, NFE_SETUP_R6, 0);
502
503	/* XXX set MAC address */
504
505	/* Tell MAC where rings are in memory */
506	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
507	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
508
509	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
510	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
511	NFE_WRITE(sc, NFE_TIMER_INT, 970);		/* XXX Magic */
512
513	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
514	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
515
516	/* enable Rx */
517	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
518
519	nfe_setmulti(sc);
520
521	/* enable interrupts */
522	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_RXERR | NFE_IRQ_RX |
523	    NFE_IRQ_RX_NOBUF | NFE_IRQ_TXERR | NFE_IRQ_TX_DONE | NFE_IRQ_LINK |
524	    NFE_IRQ_TXERR2);
525
526	mii_mediachg(&sc->sc_mii);
527
528	timeout_set(&sc->sc_timeout, nfe_tick, sc);
529
530	ifp->if_flags |= IFF_RUNNING;
531	ifp->if_flags &= ~IFF_OACTIVE;
532
533	return (0);
534}
535
536void
537nfe_reset(struct nfe_softc *sc)
538{
539	printf("nfe_reset!\n");
540}
541
542int
543nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring, int count)
544{
545	struct arpcom *ac = &sc->sc_arpcom;
546	struct ifnet *ifp = &ac->ac_if;
547	struct nfe_rx_data *data;
548	struct nfe_desc *desc_v1;
549	struct nfe_desc_v3 *desc_v3;
550	void **desc;
551	int i, nsegs, error, descsize;
552	int bufsz = ifp->if_mtu + 64;
553
554	if (sc->sc_flags & NFE_40BIT_ADDR) {
555		desc = (void **)&ring->desc_v3;
556		descsize = sizeof(struct nfe_desc_v3);
557	} else {
558		desc = (void **)&ring->desc_v1;
559		descsize = sizeof(struct nfe_desc);
560	}
561
562	ring->count = count;
563	ring->cur = ring->next = 0;
564
565	error = bus_dmamap_create(sc->sc_dmat, count * descsize, 1,
566	    count * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
567	if (error != 0) {
568		printf("%s: could not create desc DMA map\n",
569		    sc->sc_dev.dv_xname);
570		goto fail;
571	}
572
573	error = bus_dmamem_alloc(sc->sc_dmat, count * descsize,
574	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
575	if (error != 0) {
576		printf("%s: could not allocate DMA memory\n",
577		    sc->sc_dev.dv_xname);
578		goto fail;
579	}
580
581	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
582	    count * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
583	if (error != 0) {
584		printf("%s: could not map desc DMA memory\n",
585		    sc->sc_dev.dv_xname);
586		goto fail;
587	}
588
589	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
590	    count * descsize, NULL, BUS_DMA_NOWAIT);
591	if (error != 0) {
592		printf("%s: could not load desc DMA map\n",
593		    sc->sc_dev.dv_xname);
594		goto fail;
595	}
596
597	memset(*desc, 0, count * descsize);
598	ring->physaddr = ring->map->dm_segs->ds_addr;
599
600	ring->data = malloc(count * sizeof (struct nfe_rx_data), M_DEVBUF,
601	    M_NOWAIT);
602	if (ring->data == NULL) {
603		printf("%s: could not allocate soft data\n",
604		    sc->sc_dev.dv_xname);
605		error = ENOMEM;
606		goto fail;
607	}
608
609	/*
610	 * Pre-allocate Rx buffers and populate Rx ring.
611	 */
612	memset(ring->data, 0, count * sizeof (struct nfe_rx_data));
613	for (i = 0; i < count; i++) {
614		data = &sc->rxq.data[i];
615
616		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
617		    0, BUS_DMA_NOWAIT, &data->map);
618		if (error != 0) {
619			printf("%s: could not create DMA map\n",
620			    sc->sc_dev.dv_xname);
621			goto fail;
622		}
623
624		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
625		if (data->m == NULL) {
626			printf("%s: could not allocate rx mbuf\n",
627			    sc->sc_dev.dv_xname);
628			error = ENOMEM;
629			goto fail;
630		}
631
632		MCLGET(data->m, M_DONTWAIT);
633		if (!(data->m->m_flags & M_EXT)) {
634			printf("%s: could not allocate rx mbuf cluster\n",
635			    sc->sc_dev.dv_xname);
636			error = ENOMEM;
637			goto fail;
638		}
639
640		error = bus_dmamap_load(sc->sc_dmat, data->map,
641		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
642		if (error != 0) {
643			printf("%s: could not load rx buf DMA map",
644			    sc->sc_dev.dv_xname);
645			goto fail;
646		}
647
648		if (sc->sc_flags & NFE_40BIT_ADDR) {
649			desc_v3 = &sc->rxq.desc_v3[i];
650			desc_v3->physaddr[0] =
651#if 0
652			(htole64(data->map->dm_segs->ds_addr) >> 32) & 0xffffffff;
653#endif
654			    0;
655			desc_v3->physaddr[1] =
656			    htole64(data->map->dm_segs->ds_addr) & 0xffffffff;
657			desc_v3->flags = htole32(bufsz | NFE_RX_READY);
658		} else {
659			desc_v1 = &sc->rxq.desc_v1[i];
660			desc_v1->physaddr =
661			    htole32(data->map->dm_segs->ds_addr);
662			desc_v1->flags = htole32(bufsz | NFE_RX_READY);
663		}
664	}
665
666	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
667	    BUS_DMASYNC_PREWRITE);
668
669	return 0;
670
671fail:	nfe_free_rx_ring(sc, ring);
672	return error;
673}
674
675void
676nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
677{
678	struct arpcom *ac = &sc->sc_arpcom;
679	struct ifnet *ifp = &ac->ac_if;
680	int i, bufsz = ifp->if_mtu + 64;
681
682	for (i = 0; i < ring->count; i++) {
683		if (sc->sc_flags & NFE_40BIT_ADDR) {
684			ring->desc_v3[i].flags =
685			    htole32(bufsz | NFE_RX_READY);
686		} else {
687			ring->desc_v1[i].flags =
688			    htole32(bufsz | NFE_RX_READY);
689		}
690	}
691
692	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
693	    BUS_DMASYNC_PREWRITE);
694
695	ring->cur = ring->next = 0;
696}
697
698void
699nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
700{
701	struct nfe_rx_data *data;
702	void *desc;
703	int i, descsize;
704
705	if (sc->sc_flags & NFE_40BIT_ADDR) {
706		desc = ring->desc_v3;
707		descsize = sizeof(struct nfe_desc_v3);
708	} else {
709		desc = ring->desc_v1;
710		descsize = sizeof(struct nfe_desc);
711	}
712
713	if (desc != NULL) {
714		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
715		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
716		bus_dmamap_unload(sc->sc_dmat, ring->map);
717		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
718		    ring->count * descsize);
719		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
720	}
721
722	if (ring->data != NULL) {
723		for (i = 0; i < ring->count; i++) {
724			data = &ring->data[i];
725
726			if (data->m != NULL) {
727				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
728				    data->map->dm_mapsize,
729				    BUS_DMASYNC_POSTREAD);
730				bus_dmamap_unload(sc->sc_dmat, data->map);
731				m_freem(data->m);
732			}
733
734			if (data->map != NULL)
735				bus_dmamap_destroy(sc->sc_dmat, data->map);
736		}
737		free(ring->data, M_DEVBUF);
738	}
739}
740
741int
742nfe_rxintr(struct nfe_softc *sc)
743{
744	printf("nfe_rxintr!\n");
745	return (0);
746}
747
748int
749nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring, int count)
750{
751	int i, nsegs, error;
752	void **desc;
753	int descsize;
754
755	if (sc->sc_flags & NFE_40BIT_ADDR) {
756		desc = (void **)&ring->desc_v3;
757		descsize = sizeof(struct nfe_desc_v3);
758	} else {
759		desc = (void **)&ring->desc_v1;
760		descsize = sizeof(struct nfe_desc);
761	}
762
763	ring->count = count;
764	ring->queued = 0;
765	ring->cur = ring->next = 0;
766
767	error = bus_dmamap_create(sc->sc_dmat, count * descsize, 1,
768	    count * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
769
770	if (error != 0) {
771		printf("%s: could not create desc DMA map\n",
772		    sc->sc_dev.dv_xname);
773		goto fail;
774	}
775
776	error = bus_dmamem_alloc(sc->sc_dmat, count * descsize,
777	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
778	if (error != 0) {
779		printf("%s: could not allocate DMA memory\n",
780		    sc->sc_dev.dv_xname);
781		goto fail;
782	}
783
784	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
785	    count * sizeof(struct nfe_desc), (caddr_t *)desc,
786	    BUS_DMA_NOWAIT);
787	if (error != 0) {
788		printf("%s: could not map desc DMA memory\n",
789		    sc->sc_dev.dv_xname);
790		goto fail;
791	}
792
793	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
794	    count * descsize, NULL, BUS_DMA_NOWAIT);
795	if (error != 0) {
796		printf("%s: could not load desc DMA map\n",
797		    sc->sc_dev.dv_xname);
798		goto fail;
799	}
800
801	memset(desc, 0, count * sizeof(struct nfe_desc));
802	ring->physaddr = ring->map->dm_segs->ds_addr;
803
804	ring->data = malloc(count * sizeof(struct nfe_tx_data), M_DEVBUF,
805	    M_NOWAIT);
806	if (ring->data == NULL) {
807		printf("%s: could not allocate soft data\n",
808		    sc->sc_dev.dv_xname);
809		error = ENOMEM;
810		goto fail;
811	}
812
813	memset(ring->data, 0, count * sizeof (struct nfe_tx_data));
814	for (i = 0; i < count; i++) {
815		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
816		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
817		    &ring->data[i].map);
818		if (error != 0) {
819			printf("%s: could not create DMA map\n",
820			    sc->sc_dev.dv_xname);
821			goto fail;
822		}
823	}
824
825	return 0;
826
827fail:	nfe_free_tx_ring(sc, ring);
828	return error;
829}
830
831void
832nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
833{
834	void *desc;
835	struct nfe_tx_data *data;
836	int i;
837
838	for (i = 0; i < ring->count; i++) {
839		if (sc->sc_flags & NFE_40BIT_ADDR)
840			desc = &ring->desc_v3[i];
841		else
842			desc = &ring->desc_v1[i];
843
844		data = &ring->data[i];
845
846		if (data->m != NULL) {
847			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
848			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
849			bus_dmamap_unload(sc->sc_dmat, data->map);
850			m_freem(data->m);
851			data->m = NULL;
852		}
853
854		if (sc->sc_flags & NFE_40BIT_ADDR)
855			((struct nfe_desc_v3 *)desc)->flags = 0;
856		else
857			((struct nfe_desc *)desc)->flags = 0;
858	}
859
860	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
861	    BUS_DMASYNC_PREWRITE);
862
863	ring->queued = 0;
864	ring->cur = ring->next = 0;
865}
866
867void
868nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
869{
870	struct nfe_tx_data *data;
871	void *desc;
872	int i, descsize;
873
874	if (sc->sc_flags & NFE_40BIT_ADDR) {
875		desc = ring->desc_v3;
876		descsize = sizeof(struct nfe_desc_v3);
877	} else {
878		desc = ring->desc_v1;
879		descsize = sizeof(struct nfe_desc);
880	}
881
882	if (desc != NULL) {
883		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
884		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
885		bus_dmamap_unload(sc->sc_dmat, ring->map);
886		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
887		    ring->count * descsize);
888		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
889	}
890
891	if (ring->data != NULL) {
892		for (i = 0; i < ring->count; i++) {
893			data = &ring->data[i];
894
895			if (data->m != NULL) {
896				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
897				    data->map->dm_mapsize,
898				    BUS_DMASYNC_POSTWRITE);
899				bus_dmamap_unload(sc->sc_dmat, data->map);
900				m_freem(data->m);
901			}
902
903			if (data->map != NULL)
904				bus_dmamap_destroy(sc->sc_dmat, data->map);
905		}
906		free(ring->data, M_DEVBUF);
907	}
908}
909
910int
911nfe_txintr(struct nfe_softc *sc)
912{
913	printf("nfe_txintr!\n");
914	return (0);
915}
916
917int
918nfe_mediachange(struct ifnet *ifp)
919{
920	struct nfe_softc *sc = ifp->if_softc;
921	struct mii_data	*mii = &sc->sc_mii;
922	int val;
923
924	DPRINTF(("nfe_mediachange\n"));
925#if 0
926	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
927		/* XXX? */
928	else
929#endif
930		val = 0;
931
932	val |= NFE_MEDIA_SET;
933
934	switch (IFM_SUBTYPE(mii->mii_media_active)) {
935	case IFM_1000_T:
936		val |= NFE_MEDIA_1000T;
937		break;
938	case IFM_100_TX:
939		val |= NFE_MEDIA_100TX;
940		break;
941	case IFM_10_T:
942		val |= NFE_MEDIA_10T;
943		break;
944	}
945
946	DPRINTF(("nfe_miibus_statchg: val=0x%x\n", val));
947	NFE_WRITE(sc, NFE_LINKSPEED, val);
948	return (0);
949}
950
951void
952nfe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
953{
954	struct nfe_softc *sc = ifp->if_softc;
955
956	mii_pollstat(&sc->sc_mii);
957	ifmr->ifm_status = sc->sc_mii.mii_media_status;
958	ifmr->ifm_active = sc->sc_mii.mii_media_active;
959}
960
961void
962nfe_setmulti(struct nfe_softc *sc)
963{
964	NFE_WRITE(sc, NFE_MULT_ADDR1, 0x01);
965	NFE_WRITE(sc, NFE_MULT_ADDR2, 0);
966	NFE_WRITE(sc, NFE_MULT_MASK1, 0);
967	NFE_WRITE(sc, NFE_MULT_MASK2, 0);
968#ifdef notyet
969	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_MYADDR);
970#else
971	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_PROMISC);
972#endif
973}
974
975void
976nfe_update_promisc(struct nfe_softc *sc)
977{
978}
979
980void
981nfe_tick(void *arg)
982{
983	struct nfe_softc *sc = arg;
984	int s;
985
986	s = splnet();
987	mii_tick(&sc->sc_mii);
988	splx(s);
989
990	timeout_add(&sc->sc_timeout, hz);
991}
992