if_nfe.c revision 1.4
1/*	$OpenBSD: if_nfe.c,v 1.4 2005/12/17 11:12:54 jsg Exp $	*/
2/*
3 * Copyright (c) 2005 Jonathan Gray <jsg@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/* Driver for nvidia nForce Ethernet */
19
20#include "bpfilter.h"
21#include "vlan.h"
22
23#include <sys/param.h>
24#include <sys/endian.h>
25#include <sys/systm.h>
26#include <sys/types.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/malloc.h>
30#include <sys/kernel.h>
31#include <sys/device.h>
32#include <sys/socket.h>
33
34#include <machine/bus.h>
35
36#include <net/if.h>
37#include <net/if_dl.h>
38#include <net/if_media.h>
39
40#ifdef INET
41#include <netinet/in.h>
42#include <netinet/in_systm.h>
43#include <netinet/in_var.h>
44#include <netinet/ip.h>
45#include <netinet/if_ether.h>
46#endif
47
48#if NVLAN > 0
49#include <net/if_types.h>
50#include <net/if_vlan_var.h>
51#endif
52
53#if NBPFILTER > 0
54#include <net/bpf.h>
55#endif
56
57#include <dev/mii/mii.h>
58#include <dev/mii/miivar.h>
59
60#include <dev/pci/pcireg.h>
61#include <dev/pci/pcivar.h>
62#include <dev/pci/pcidevs.h>
63
64#include <dev/pci/if_nfereg.h>
65#include <dev/pci/if_nfevar.h>
66
67int	nfe_match(struct device *, void *, void *);
68void	nfe_attach(struct device *, struct device *, void *);
69
70int	nfe_intr(void *);
71int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
72void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
73void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
74int	nfe_rxintr(struct nfe_softc *);
75int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
76void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
77void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
78int	nfe_txintr(struct nfe_softc *);
79
80int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
81void	nfe_start(struct ifnet *);
82void	nfe_stop(struct ifnet *, int);
83void	nfe_watchdog(struct ifnet *);
84int	nfe_init(struct ifnet *);
85void	nfe_reset(struct nfe_softc *);
86void	nfe_setmulti(struct nfe_softc *);
87void	nfe_update_promisc(struct nfe_softc *);
88void	nfe_tick(void *);
89
90int	nfe_miibus_readreg(struct device *, int, int);
91void	nfe_miibus_writereg(struct device *, int, int, int);
92void	nfe_miibus_statchg(struct device *);
93int	nfe_mediachange(struct ifnet *);
94void	nfe_mediastatus(struct ifnet *, struct ifmediareq *);
95
96struct cfattach nfe_ca = {
97	sizeof(struct nfe_softc),
98	nfe_match,
99	nfe_attach
100};
101
102struct cfdriver nfe_cd = {
103	0, "nfe", DV_IFNET
104};
105
106
107#ifdef NFE_DEBUG
108int	nfedebug = 1;
109#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
110#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
111#else
112#define DPRINTF(x)
113#define DPRINTFN(n,x)
114#endif
115
116const struct pci_matchid nfe_devices[] = {
117	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
118	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
119	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
120	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
132};
133
134int
135nfe_match(struct device *dev, void *match, void *aux)
136{
137	return (pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
138	    sizeof(nfe_devices)/sizeof(nfe_devices[0])));
139}
140
141void
142nfe_attach(struct device *parent, struct device *self, void *aux)
143{
144	struct nfe_softc	*sc = (struct nfe_softc *)self;
145	struct pci_attach_args	*pa = aux;
146	pci_chipset_tag_t	pc = pa->pa_pc;
147	pci_intr_handle_t	ih;
148	const char		*intrstr = NULL;
149	struct ifnet		*ifp;
150	bus_size_t		iosize;
151	pcireg_t		command;
152	int			i;
153
154	/*
155	 * Map control/status registers.
156	 */
157	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
158	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
159
160	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
161	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
162
163	if ((command & PCI_COMMAND_MEM_ENABLE) == 0) {
164		printf(": mem space not enabled\n");
165		return;
166	}
167
168	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
169	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0)) {
170		printf(": can't map mem space\n");
171		return;
172	}
173
174	/* Allocate interrupt */
175	if (pci_intr_map(pa, &ih)) {
176		printf(": couldn't map interrupt\n");
177		return;
178	}
179	intrstr = pci_intr_string(pc, ih);
180	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
181	    sc->sc_dev.dv_xname);
182	if (sc->sc_ih == NULL) {
183		printf(": couldn't establish interrupt");
184		if (intrstr != NULL)
185			printf(" at %s", intrstr);
186		return;
187	}
188	printf(": %s", intrstr);
189
190	sc->sc_dmat = pa->pa_dmat;
191
192	i = betoh16(NFE_READ(sc, NFE_MACADDR_LO) & 0xffff);
193	memcpy((char *)sc->sc_arpcom.ac_enaddr, &i, 2);
194	i = betoh32(NFE_READ(sc, NFE_MACADDR_HI));
195	memcpy(&(sc->sc_arpcom.ac_enaddr[2]), &i, 4);
196
197	printf(", address %s\n",
198	    ether_sprintf(sc->sc_arpcom.ac_enaddr));
199
200	switch(PCI_PRODUCT(pa->pa_id)) {
201	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
202	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
203	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
204	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
205		sc->sc_flags |= NFE_JUMBO_SUP;
206		break;
207	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
208	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
209	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
210	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
211	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
212	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
213	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
214	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
215		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
216		break;
217	default:
218		sc->sc_flags = 0;
219	}
220
221	/*
222	 * Allocate Tx and Rx rings.
223	 */
224	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
225		printf("%s: could not allocate Tx ring\n",
226		    sc->sc_dev.dv_xname);
227		goto fail1;
228	}
229
230	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
231		printf("%s: could not allocate Rx ring\n",
232		    sc->sc_dev.dv_xname);
233		goto fail2;
234	}
235
236	NFE_WRITE(sc, NFE_RING_SIZE, NFE_RX_RING_COUNT << 16 |
237	    NFE_TX_RING_COUNT);
238
239	ifp = &sc->sc_arpcom.ac_if;
240	ifp->if_softc = sc;
241	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
242	ifp->if_ioctl = nfe_ioctl;
243	ifp->if_start = nfe_start;
244	ifp->if_watchdog = nfe_watchdog;
245	ifp->if_init = nfe_init;
246	ifp->if_baudrate = 1000000000;
247	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
248	IFQ_SET_READY(&ifp->if_snd);
249
250	/* Set interface name */
251	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
252
253	sc->sc_mii.mii_ifp = ifp;
254	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
255	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
256	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
257
258	/* XXX always seem to get a ghost ukphy along with eephy on nf4u */
259	ifmedia_init(&sc->sc_mii.mii_media, 0,
260	    nfe_mediachange, nfe_mediastatus);
261	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
262	    MII_OFFSET_ANY, 0);
263	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
264		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
265		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
266		    0, NULL);
267		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
268	} else
269		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
270
271	if_attach(ifp);
272	ether_ifattach(ifp);
273
274	/* XXX powerhook */
275	return;
276
277fail2:
278	nfe_free_tx_ring(sc, &sc->txq);
279fail1:
280	nfe_free_rx_ring(sc, &sc->rxq);
281
282	return;
283}
284
285void
286nfe_miibus_statchg(struct device *dev)
287{
288	struct nfe_softc *sc = (struct nfe_softc *) dev;
289	struct mii_data	*mii = &sc->sc_mii;
290	uint32_t reg;
291
292	reg = NFE_READ(sc, NFE_PHY_INT);
293
294	switch (IFM_SUBTYPE(mii->mii_media_active)) {
295	case IFM_1000_T:
296		reg |= NFE_PHY_1000T;
297		break;
298	case IFM_100_TX:
299		reg |= NFE_PHY_100TX;
300		break;
301	}
302	NFE_WRITE(sc, NFE_PHY_INT, reg);
303}
304
305int
306nfe_miibus_readreg(struct device *dev, int phy, int reg)
307{
308	struct nfe_softc *sc = (struct nfe_softc *) dev;
309	uint32_t r;
310
311	r = NFE_READ(sc, NFE_PHY_CTL);
312	if (r & NFE_PHY_BUSY) {
313		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
314		delay(100);
315	}
316
317	NFE_WRITE(sc, NFE_PHY_CTL, reg | (phy << NFE_PHYADD_SHIFT));
318	delay(1000);
319	r = NFE_READ(sc, NFE_PHY_DATA);
320	if (r != 0xffffffff && r != 0)
321		sc->phyaddr = phy;
322
323	DPRINTFN(2, ("nfe mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, r));
324
325	return (r);
326}
327
328void
329nfe_miibus_writereg(struct device *dev, int phy, int reg, int data)
330{
331	struct nfe_softc *sc = (struct nfe_softc *) dev;
332	uint32_t r;
333
334	r = NFE_READ(sc, NFE_PHY_CTL);
335	if (r & NFE_PHY_BUSY) {
336		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
337		delay(100);
338	}
339
340	NFE_WRITE(sc, NFE_PHY_DATA, data);
341	r = reg | (phy << NFE_PHYADD_SHIFT) | NFE_PHY_WRITE;
342	NFE_WRITE(sc, NFE_PHY_CTL, r);
343}
344
345int
346nfe_intr(void *arg)
347{
348	struct nfe_softc *sc = arg;
349	uint32_t r;
350
351	/* disable interrupts */
352	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
353
354	r = NFE_READ(sc, NFE_IRQ_STATUS) & 0x1ff;
355
356	if (r == 0) {
357		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
358		return(0);
359	}
360
361	if (r & NFE_IRQ_RX)
362		nfe_rxintr(sc);
363
364	if (r & NFE_IRQ_TX_DONE)
365		nfe_txintr(sc);
366
367	DPRINTF(("nfe_intr: interrupt register %x", r));
368
369	/* re-enable interrupts */
370	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
371
372	return (1);
373}
374
375int
376nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
377{
378	struct nfe_softc *sc = ifp->if_softc;
379	struct ifreq *ifr = (struct ifreq *)data;
380	struct ifaddr *ifa = (struct ifaddr *) data;
381	int s, error = 0;
382
383	s = splnet();
384
385	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
386		splx(s);
387		return (error);
388	}
389
390	switch (cmd) {
391	case SIOCSIFMTU:
392		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
393			error = EINVAL;
394		else if (ifp->if_mtu != ifr->ifr_mtu)
395			ifp->if_mtu = ifr->ifr_mtu;
396		break;
397	case SIOCSIFMEDIA:
398	case SIOCGIFMEDIA:
399		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
400		break;
401	case SIOCSIFADDR:
402		ifp->if_flags |= IFF_UP;
403		switch (ifa->ifa_addr->sa_family) {
404#ifdef INET
405		case AF_INET:
406			nfe_init(ifp);
407			arp_ifinit(&sc->sc_arpcom, ifa);
408			break;
409#endif
410		default:
411			nfe_init(ifp);
412			break;
413		}
414		break;
415	case SIOCADDMULTI:
416	case SIOCDELMULTI:
417		if (cmd == SIOCADDMULTI)
418			error = ether_addmulti(ifr, &sc->sc_arpcom);
419		else
420			error = ether_delmulti(ifr, &sc->sc_arpcom);
421
422		if (error == ENETRESET) {
423			if (ifp->if_flags & IFF_RUNNING)
424				nfe_setmulti(sc);
425			error = 0;
426		}
427		break;
428	case SIOCSIFFLAGS:
429		if (ifp->if_flags & IFF_UP) {
430			if (ifp->if_flags & IFF_RUNNING)
431				nfe_update_promisc(sc);
432			else
433				nfe_init(ifp);
434		} else {
435			if (ifp->if_flags & IFF_RUNNING)
436				nfe_stop(ifp, 1);
437		}
438		break;
439	default:
440		error = EINVAL;
441	}
442	splx(s);
443
444	return (error);
445}
446
447void
448nfe_start(struct ifnet *ifp)
449{
450}
451
452void
453nfe_stop(struct ifnet *ifp, int disable)
454{
455	struct nfe_softc *sc = ifp->if_softc;
456
457	timeout_del(&sc->sc_timeout);
458
459	ifp->if_timer = 0;
460	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
461
462	/* abort Tx */
463	NFE_WRITE(sc, NFE_TX_CTL, 0);
464
465	/* disable Rx */
466	NFE_WRITE(sc, NFE_RX_CTL, 0);
467
468	/* disable interrupts */
469	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
470
471	/* reset Tx and Rx rings */
472	nfe_reset_tx_ring(sc, &sc->txq);
473	nfe_reset_rx_ring(sc, &sc->rxq);
474}
475
476void
477nfe_watchdog(struct ifnet *ifp)
478{
479}
480
481int
482nfe_init(struct ifnet *ifp)
483{
484	struct nfe_softc	*sc = ifp->if_softc;
485	int r;
486
487	nfe_stop(ifp, 0);
488
489	NFE_WRITE(sc, NFE_TX_UNK, 0);
490
491	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
492	delay(10);
493
494	r = NFE_RXTX_BIT2;
495	if (sc->sc_flags & NFE_40BIT_ADDR)
496		r |= NFE_RXTX_V3MAGIC|NFE_RXTX_RXCHECK;
497	else if (sc->sc_flags & NFE_JUMBO_SUP)
498		r |= NFE_RXTX_V2MAGIC|NFE_RXTX_RXCHECK;
499
500	NFE_WRITE(sc, NFE_RXTX_CTL, r);
501
502	NFE_WRITE(sc, NFE_SETUP_R6, 0);
503
504	/* XXX set MAC address */
505
506	/* Tell MAC where rings are in memory */
507	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
508	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
509
510	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
511	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
512	NFE_WRITE(sc, NFE_TIMER_INT, 970);		/* XXX Magic */
513
514	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
515	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
516
517	/* enable Rx */
518	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
519
520	nfe_setmulti(sc);
521
522	/* enable interrupts */
523	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_RXERR | NFE_IRQ_RX |
524	    NFE_IRQ_RX_NOBUF | NFE_IRQ_TXERR | NFE_IRQ_TX_DONE | NFE_IRQ_LINK |
525	    NFE_IRQ_TXERR2);
526
527	mii_mediachg(&sc->sc_mii);
528
529	timeout_set(&sc->sc_timeout, nfe_tick, sc);
530
531	ifp->if_flags |= IFF_RUNNING;
532	ifp->if_flags &= ~IFF_OACTIVE;
533
534	return (0);
535}
536
537void
538nfe_reset(struct nfe_softc *sc)
539{
540	printf("nfe_reset!\n");
541}
542
543int
544nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
545{
546	struct nfe_rx_data *data;
547	struct nfe_desc *desc_v1;
548	struct nfe_desc_v3 *desc_v3;
549	void **desc;
550	int i, nsegs, error, descsize;
551
552	if (sc->sc_flags & NFE_40BIT_ADDR) {
553		desc = (void **)&ring->desc_v3;
554		descsize = sizeof(struct nfe_desc_v3);
555	} else {
556		desc = (void **)&ring->desc_v1;
557		descsize = sizeof(struct nfe_desc);
558	}
559
560	ring->cur = ring->next = 0;
561
562	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
563	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
564	if (error != 0) {
565		printf("%s: could not create desc DMA map\n",
566		    sc->sc_dev.dv_xname);
567		goto fail;
568	}
569
570	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
571	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
572	if (error != 0) {
573		printf("%s: could not allocate DMA memory\n",
574		    sc->sc_dev.dv_xname);
575		goto fail;
576	}
577
578	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
579	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
580	if (error != 0) {
581		printf("%s: could not map desc DMA memory\n",
582		    sc->sc_dev.dv_xname);
583		goto fail;
584	}
585
586	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
587	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
588	if (error != 0) {
589		printf("%s: could not load desc DMA map\n",
590		    sc->sc_dev.dv_xname);
591		goto fail;
592	}
593
594	memset(*desc, 0, NFE_RX_RING_COUNT * descsize);
595	ring->physaddr = ring->map->dm_segs->ds_addr;
596
597	/*
598	 * Pre-allocate Rx buffers and populate Rx ring.
599	 */
600
601	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
602		data = &sc->rxq.data[i];
603
604		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
605		    0, BUS_DMA_NOWAIT, &data->map);
606		if (error != 0) {
607			printf("%s: could not create DMA map\n",
608			    sc->sc_dev.dv_xname);
609			goto fail;
610		}
611
612		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
613		if (data->m == NULL) {
614			printf("%s: could not allocate rx mbuf\n",
615			    sc->sc_dev.dv_xname);
616			error = ENOMEM;
617			goto fail;
618		}
619
620		MCLGET(data->m, M_DONTWAIT);
621		if (!(data->m->m_flags & M_EXT)) {
622			printf("%s: could not allocate rx mbuf cluster\n",
623			    sc->sc_dev.dv_xname);
624			error = ENOMEM;
625			goto fail;
626		}
627
628		error = bus_dmamap_load(sc->sc_dmat, data->map,
629		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
630		if (error != 0) {
631			printf("%s: could not load rx buf DMA map",
632			    sc->sc_dev.dv_xname);
633			goto fail;
634		}
635
636		if (sc->sc_flags & NFE_40BIT_ADDR) {
637			desc_v3 = &sc->rxq.desc_v3[i];
638			desc_v3->physaddr[0] =
639#if 0
640			(htole64(data->map->dm_segs->ds_addr) >> 32) & 0xffffffff;
641#endif
642			    0;
643			desc_v3->physaddr[1] =
644			    htole64(data->map->dm_segs->ds_addr) & 0xffffffff;
645
646			desc_v3->length = htole16(MCLBYTES);
647			desc_v3->flags = htole16(NFE_RX_READY);
648		} else {
649			desc_v1 = &sc->rxq.desc_v1[i];
650			desc_v1->physaddr =
651			    htole32(data->map->dm_segs->ds_addr);
652			desc_v1->length = htole16(MCLBYTES);
653			desc_v1->flags = htole16(NFE_RX_READY);
654		}
655	}
656
657	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
658	    BUS_DMASYNC_PREWRITE);
659
660	return 0;
661
662fail:	nfe_free_rx_ring(sc, ring);
663	return error;
664}
665
666void
667nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
668{
669	int i;
670
671	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
672		if (sc->sc_flags & NFE_40BIT_ADDR) {
673			ring->desc_v3[i].length = htole16(MCLBYTES);
674			ring->desc_v3[i].flags = htole16(NFE_RX_READY);
675		} else {
676			ring->desc_v1[i].length = htole16(MCLBYTES);
677			ring->desc_v1[i].flags = htole16(NFE_RX_READY);
678		}
679	}
680
681	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
682	    BUS_DMASYNC_PREWRITE);
683
684	ring->cur = ring->next = 0;
685}
686
687void
688nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
689{
690	struct nfe_rx_data *data;
691	void *desc;
692	int i, descsize;
693
694	if (sc->sc_flags & NFE_40BIT_ADDR) {
695		desc = ring->desc_v3;
696		descsize = sizeof(struct nfe_desc_v3);
697	} else {
698		desc = ring->desc_v1;
699		descsize = sizeof(struct nfe_desc);
700	}
701
702	if (desc != NULL) {
703		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
704		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
705		bus_dmamap_unload(sc->sc_dmat, ring->map);
706		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
707		    NFE_RX_RING_COUNT * descsize);
708		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
709	}
710
711	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
712		data = &ring->data[i];
713
714		if (data->m != NULL) {
715			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
716			    data->map->dm_mapsize,
717			    BUS_DMASYNC_POSTREAD);
718			bus_dmamap_unload(sc->sc_dmat, data->map);
719			m_freem(data->m);
720		}
721
722		if (data->map != NULL)
723			bus_dmamap_destroy(sc->sc_dmat, data->map);
724	}
725}
726
727int
728nfe_rxintr(struct nfe_softc *sc)
729{
730	printf("nfe_rxintr!\n");
731	return (0);
732}
733
734int
735nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
736{
737	int i, nsegs, error;
738	void **desc;
739	int descsize;
740
741	if (sc->sc_flags & NFE_40BIT_ADDR) {
742		desc = (void **)&ring->desc_v3;
743		descsize = sizeof(struct nfe_desc_v3);
744	} else {
745		desc = (void **)&ring->desc_v1;
746		descsize = sizeof(struct nfe_desc);
747	}
748
749	ring->queued = 0;
750	ring->cur = ring->next = 0;
751
752	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
753	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
754
755	if (error != 0) {
756		printf("%s: could not create desc DMA map\n",
757		    sc->sc_dev.dv_xname);
758		goto fail;
759	}
760
761	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
762	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
763	if (error != 0) {
764		printf("%s: could not allocate DMA memory\n",
765		    sc->sc_dev.dv_xname);
766		goto fail;
767	}
768
769	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
770	    NFE_TX_RING_COUNT * sizeof(struct nfe_desc), (caddr_t *)desc,
771	    BUS_DMA_NOWAIT);
772	if (error != 0) {
773		printf("%s: could not map desc DMA memory\n",
774		    sc->sc_dev.dv_xname);
775		goto fail;
776	}
777
778	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
779	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
780	if (error != 0) {
781		printf("%s: could not load desc DMA map\n",
782		    sc->sc_dev.dv_xname);
783		goto fail;
784	}
785
786	memset(*desc, 0, NFE_TX_RING_COUNT * descsize);
787	ring->physaddr = ring->map->dm_segs->ds_addr;
788
789	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
790		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
791		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
792		    &ring->data[i].map);
793		if (error != 0) {
794			printf("%s: could not create DMA map\n",
795			    sc->sc_dev.dv_xname);
796			goto fail;
797		}
798	}
799
800	return 0;
801
802fail:	nfe_free_tx_ring(sc, ring);
803	return error;
804}
805
806void
807nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
808{
809	void *desc;
810	struct nfe_tx_data *data;
811	int i;
812
813	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
814		if (sc->sc_flags & NFE_40BIT_ADDR)
815			desc = &ring->desc_v3[i];
816		else
817			desc = &ring->desc_v1[i];
818
819		data = &ring->data[i];
820
821		if (data->m != NULL) {
822			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
823			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
824			bus_dmamap_unload(sc->sc_dmat, data->map);
825			m_freem(data->m);
826			data->m = NULL;
827		}
828
829		if (sc->sc_flags & NFE_40BIT_ADDR)
830			((struct nfe_desc_v3 *)desc)->flags = 0;
831		else
832			((struct nfe_desc *)desc)->flags = 0;
833	}
834
835	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
836	    BUS_DMASYNC_PREWRITE);
837
838	ring->queued = 0;
839	ring->cur = ring->next = 0;
840}
841
842void
843nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
844{
845	struct nfe_tx_data *data;
846	void *desc;
847	int i, descsize;
848
849	if (sc->sc_flags & NFE_40BIT_ADDR) {
850		desc = ring->desc_v3;
851		descsize = sizeof(struct nfe_desc_v3);
852	} else {
853		desc = ring->desc_v1;
854		descsize = sizeof(struct nfe_desc);
855	}
856
857	if (desc != NULL) {
858		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
859		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
860		bus_dmamap_unload(sc->sc_dmat, ring->map);
861		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
862		    NFE_TX_RING_COUNT * descsize);
863		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
864	}
865
866	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
867		data = &ring->data[i];
868
869		if (data->m != NULL) {
870			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
871			    data->map->dm_mapsize,
872			    BUS_DMASYNC_POSTWRITE);
873			bus_dmamap_unload(sc->sc_dmat, data->map);
874			m_freem(data->m);
875		}
876
877		if (data->map != NULL)
878			bus_dmamap_destroy(sc->sc_dmat, data->map);
879	}
880}
881
882int
883nfe_txintr(struct nfe_softc *sc)
884{
885	printf("nfe_txintr!\n");
886	return (0);
887}
888
889int
890nfe_mediachange(struct ifnet *ifp)
891{
892	struct nfe_softc *sc = ifp->if_softc;
893	struct mii_data	*mii = &sc->sc_mii;
894	int val;
895
896	DPRINTF(("nfe_mediachange\n"));
897#if 0
898	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
899		/* XXX? */
900	else
901#endif
902		val = 0;
903
904	val |= NFE_MEDIA_SET;
905
906	switch (IFM_SUBTYPE(mii->mii_media_active)) {
907	case IFM_1000_T:
908		val |= NFE_MEDIA_1000T;
909		break;
910	case IFM_100_TX:
911		val |= NFE_MEDIA_100TX;
912		break;
913	case IFM_10_T:
914		val |= NFE_MEDIA_10T;
915		break;
916	}
917
918	DPRINTF(("nfe_miibus_statchg: val=0x%x\n", val));
919	NFE_WRITE(sc, NFE_LINKSPEED, val);
920	return (0);
921}
922
923void
924nfe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
925{
926	struct nfe_softc *sc = ifp->if_softc;
927
928	mii_pollstat(&sc->sc_mii);
929	ifmr->ifm_status = sc->sc_mii.mii_media_status;
930	ifmr->ifm_active = sc->sc_mii.mii_media_active;
931}
932
933void
934nfe_setmulti(struct nfe_softc *sc)
935{
936	NFE_WRITE(sc, NFE_MULT_ADDR1, 0x01);
937	NFE_WRITE(sc, NFE_MULT_ADDR2, 0);
938	NFE_WRITE(sc, NFE_MULT_MASK1, 0);
939	NFE_WRITE(sc, NFE_MULT_MASK2, 0);
940#ifdef notyet
941	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_MYADDR);
942#else
943	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_PROMISC);
944#endif
945}
946
947void
948nfe_update_promisc(struct nfe_softc *sc)
949{
950}
951
952void
953nfe_tick(void *arg)
954{
955	struct nfe_softc *sc = arg;
956	int s;
957
958	s = splnet();
959	mii_tick(&sc->sc_mii);
960	splx(s);
961
962	timeout_add(&sc->sc_timeout, hz);
963}
964