if_nfe.c revision 1.3
1/*	$OpenBSD: if_nfe.c,v 1.3 2005/12/17 09:03:14 jsg Exp $	*/
2/*
3 * Copyright (c) 2005 Jonathan Gray <jsg@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/* Driver for nvidia nForce Ethernet */
19
20#include "bpfilter.h"
21#include "vlan.h"
22
23#include <sys/param.h>
24#include <sys/endian.h>
25#include <sys/systm.h>
26#include <sys/types.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/malloc.h>
30#include <sys/kernel.h>
31#include <sys/device.h>
32#include <sys/socket.h>
33
34#include <machine/bus.h>
35
36#include <net/if.h>
37#include <net/if_dl.h>
38#include <net/if_media.h>
39
40#ifdef INET
41#include <netinet/in.h>
42#include <netinet/in_systm.h>
43#include <netinet/in_var.h>
44#include <netinet/ip.h>
45#include <netinet/if_ether.h>
46#endif
47
48#if NVLAN > 0
49#include <net/if_types.h>
50#include <net/if_vlan_var.h>
51#endif
52
53#if NBPFILTER > 0
54#include <net/bpf.h>
55#endif
56
57#include <dev/mii/mii.h>
58#include <dev/mii/miivar.h>
59
60#include <dev/pci/pcireg.h>
61#include <dev/pci/pcivar.h>
62#include <dev/pci/pcidevs.h>
63
64#include <dev/pci/if_nfereg.h>
65#include <dev/pci/if_nfevar.h>
66
67int	nfe_match(struct device *, void *, void *);
68void	nfe_attach(struct device *, struct device *, void *);
69
70int	nfe_intr(void *);
71int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *, int);
72void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
73void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
74int	nfe_rxintr(struct nfe_softc *);
75int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *, int);
76void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
77void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
78int	nfe_txintr(struct nfe_softc *);
79
80int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
81void	nfe_start(struct ifnet *);
82void	nfe_stop(struct ifnet *, int);
83void	nfe_watchdog(struct ifnet *);
84int	nfe_init(struct ifnet *);
85void	nfe_reset(struct nfe_softc *);
86void	nfe_setmulti(struct nfe_softc *);
87void	nfe_update_promisc(struct nfe_softc *);
88void	nfe_tick(void *);
89
90int	nfe_miibus_readreg(struct device *, int, int);
91void	nfe_miibus_writereg(struct device *, int, int, int);
92void	nfe_miibus_statchg(struct device *);
93int	nfe_mediachange(struct ifnet *);
94void	nfe_mediastatus(struct ifnet *, struct ifmediareq *);
95
96struct cfattach nfe_ca = {
97	sizeof(struct nfe_softc),
98	nfe_match,
99	nfe_attach
100};
101
102struct cfdriver nfe_cd = {
103	0, "nfe", DV_IFNET
104};
105
106
107#ifdef NFE_DEBUG
108int	nfedebug = 1;
109#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
110#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
111#else
112#define DPRINTF(x)
113#define DPRINTFN(n,x)
114#endif
115
116const struct pci_matchid nfe_devices[] = {
117	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
118	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
119	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
120	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
132};
133
134int
135nfe_match(struct device *dev, void *match, void *aux)
136{
137	return (pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
138	    sizeof(nfe_devices)/sizeof(nfe_devices[0])));
139}
140
141void
142nfe_attach(struct device *parent, struct device *self, void *aux)
143{
144	struct nfe_softc	*sc = (struct nfe_softc *)self;
145	struct pci_attach_args	*pa = aux;
146	pci_chipset_tag_t	pc = pa->pa_pc;
147	pci_intr_handle_t	ih;
148	const char		*intrstr = NULL;
149	struct ifnet		*ifp;
150	bus_size_t		iosize;
151	pcireg_t		command;
152	int			i;
153
154	/*
155	 * Map control/status registers.
156	 */
157	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
158	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
159
160	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
161	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
162
163	if ((command & PCI_COMMAND_MEM_ENABLE) == 0) {
164		printf(": mem space not enabled\n");
165		return;
166	}
167
168	if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0,
169	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0)) {
170		printf(": can't map mem space\n");
171		return;
172	}
173
174	/* Allocate interrupt */
175	if (pci_intr_map(pa, &ih)) {
176		printf(": couldn't map interrupt\n");
177		return;
178	}
179	intrstr = pci_intr_string(pc, ih);
180	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
181	    sc->sc_dev.dv_xname);
182	if (sc->sc_ih == NULL) {
183		printf(": couldn't establish interrupt");
184		if (intrstr != NULL)
185			printf(" at %s", intrstr);
186		return;
187	}
188	printf(": %s", intrstr);
189
190	sc->sc_dmat = pa->pa_dmat;
191
192	i = betoh16(NFE_READ(sc, NFE_MACADDR_LO) & 0xffff);
193	memcpy((char *)sc->sc_arpcom.ac_enaddr, &i, 2);
194	i = betoh32(NFE_READ(sc, NFE_MACADDR_HI));
195	memcpy(&(sc->sc_arpcom.ac_enaddr[2]), &i, 4);
196
197	printf(", address %s\n",
198	    ether_sprintf(sc->sc_arpcom.ac_enaddr));
199
200	switch(PCI_PRODUCT(pa->pa_id)) {
201	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
202	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
203	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
204	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
205		sc->sc_flags |= NFE_JUMBO_SUP;
206		break;
207	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
208	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
209	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
210	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
211	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
212	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
213	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
214	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
215		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
216		break;
217	default:
218		sc->sc_flags = 0;
219	}
220
221	/*
222	 * Allocate Tx and Rx rings.
223	 */
224	if (nfe_alloc_tx_ring(sc, &sc->txq, NFE_TX_RING_COUNT) != 0) {
225		printf("%s: could not allocate Tx ring\n",
226		    sc->sc_dev.dv_xname);
227		goto fail1;
228	}
229
230	if (nfe_alloc_rx_ring(sc, &sc->rxq, NFE_RX_RING_COUNT) != 0) {
231		printf("%s: could not allocate Rx ring\n",
232		    sc->sc_dev.dv_xname);
233		goto fail2;
234	}
235
236	NFE_WRITE(sc, NFE_RING_SIZE, sc->rxq.count << 16 | sc->txq.count);
237
238	ifp = &sc->sc_arpcom.ac_if;
239	ifp->if_softc = sc;
240	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
241	ifp->if_ioctl = nfe_ioctl;
242	ifp->if_start = nfe_start;
243	ifp->if_watchdog = nfe_watchdog;
244	ifp->if_init = nfe_init;
245	ifp->if_baudrate = 1000000000;
246	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
247	IFQ_SET_READY(&ifp->if_snd);
248
249	/* Set interface name */
250	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
251
252	sc->sc_mii.mii_ifp = ifp;
253	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
254	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
255	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
256
257	/* XXX always seem to get a ghost ukphy along with eephy on nf4u */
258	ifmedia_init(&sc->sc_mii.mii_media, 0,
259	    nfe_mediachange, nfe_mediastatus);
260	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
261	    MII_OFFSET_ANY, 0);
262	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
263		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
264		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
265		    0, NULL);
266		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
267	} else
268		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
269
270	if_attach(ifp);
271	ether_ifattach(ifp);
272
273	/* XXX powerhook */
274	return;
275
276fail2:
277	nfe_free_tx_ring(sc, &sc->txq);
278fail1:
279	nfe_free_rx_ring(sc, &sc->rxq);
280
281	return;
282}
283
284void
285nfe_miibus_statchg(struct device *dev)
286{
287	struct nfe_softc *sc = (struct nfe_softc *) dev;
288	struct mii_data	*mii = &sc->sc_mii;
289	uint32_t reg;
290
291	reg = NFE_READ(sc, NFE_PHY_INT);
292
293	switch (IFM_SUBTYPE(mii->mii_media_active)) {
294	case IFM_1000_T:
295		reg |= NFE_PHY_1000T;
296		break;
297	case IFM_100_TX:
298		reg |= NFE_PHY_100TX;
299		break;
300	}
301	NFE_WRITE(sc, NFE_PHY_INT, reg);
302}
303
304int
305nfe_miibus_readreg(struct device *dev, int phy, int reg)
306{
307	struct nfe_softc *sc = (struct nfe_softc *) dev;
308	uint32_t r;
309
310	r = NFE_READ(sc, NFE_PHY_CTL);
311	if (r & NFE_PHY_BUSY) {
312		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
313		delay(100);
314	}
315
316	NFE_WRITE(sc, NFE_PHY_CTL, reg | (phy << NFE_PHYADD_SHIFT));
317	delay(1000);
318	r = NFE_READ(sc, NFE_PHY_DATA);
319	if (r != 0xffffffff && r != 0)
320		sc->phyaddr = phy;
321
322	DPRINTFN(2, ("nfe mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, r));
323
324	return (r);
325}
326
327void
328nfe_miibus_writereg(struct device *dev, int phy, int reg, int data)
329{
330	struct nfe_softc *sc = (struct nfe_softc *) dev;
331	uint32_t r;
332
333	r = NFE_READ(sc, NFE_PHY_CTL);
334	if (r & NFE_PHY_BUSY) {
335		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
336		delay(100);
337	}
338
339	NFE_WRITE(sc, NFE_PHY_DATA, data);
340	r = reg | (phy << NFE_PHYADD_SHIFT) | NFE_PHY_WRITE;
341	NFE_WRITE(sc, NFE_PHY_CTL, r);
342}
343
344int
345nfe_intr(void *arg)
346{
347	struct nfe_softc *sc = arg;
348	uint32_t r;
349
350	/* disable interrupts */
351	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
352
353	r = NFE_READ(sc, NFE_IRQ_STATUS) & 0x1ff;
354
355	if (r == 0) {
356		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
357		return(0);
358	}
359
360	if (r & NFE_IRQ_RX)
361		nfe_rxintr(sc);
362
363	if (r & NFE_IRQ_TX_DONE)
364		nfe_txintr(sc);
365
366	DPRINTF(("nfe_intr: interrupt register %x", r));
367
368	/* re-enable interrupts */
369	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED | NFE_IRQ_TIMER);
370
371	return (1);
372}
373
374int
375nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
376{
377	struct nfe_softc *sc = ifp->if_softc;
378	struct ifreq *ifr = (struct ifreq *)data;
379	struct ifaddr *ifa = (struct ifaddr *) data;
380	int s, error = 0;
381
382	s = splnet();
383
384	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
385		splx(s);
386		return (error);
387	}
388
389	switch (cmd) {
390	case SIOCSIFMTU:
391		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
392			error = EINVAL;
393		else if (ifp->if_mtu != ifr->ifr_mtu)
394			ifp->if_mtu = ifr->ifr_mtu;
395		break;
396	case SIOCSIFMEDIA:
397	case SIOCGIFMEDIA:
398		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
399		break;
400	case SIOCSIFADDR:
401		ifp->if_flags |= IFF_UP;
402		switch (ifa->ifa_addr->sa_family) {
403#ifdef INET
404		case AF_INET:
405			nfe_init(ifp);
406			arp_ifinit(&sc->sc_arpcom, ifa);
407			break;
408#endif
409		default:
410			nfe_init(ifp);
411			break;
412		}
413		break;
414	case SIOCADDMULTI:
415	case SIOCDELMULTI:
416		if (cmd == SIOCADDMULTI)
417			error = ether_addmulti(ifr, &sc->sc_arpcom);
418		else
419			error = ether_delmulti(ifr, &sc->sc_arpcom);
420
421		if (error == ENETRESET) {
422			if (ifp->if_flags & IFF_RUNNING)
423				nfe_setmulti(sc);
424			error = 0;
425		}
426		break;
427	case SIOCSIFFLAGS:
428		if (ifp->if_flags & IFF_UP) {
429			if (ifp->if_flags & IFF_RUNNING)
430				nfe_update_promisc(sc);
431			else
432				nfe_init(ifp);
433		} else {
434			if (ifp->if_flags & IFF_RUNNING)
435				nfe_stop(ifp, 1);
436		}
437		break;
438	default:
439		error = EINVAL;
440	}
441	splx(s);
442
443	return (error);
444}
445
446void
447nfe_start(struct ifnet *ifp)
448{
449}
450
451void
452nfe_stop(struct ifnet *ifp, int disable)
453{
454	struct nfe_softc *sc = ifp->if_softc;
455
456	timeout_del(&sc->sc_timeout);
457
458	ifp->if_timer = 0;
459	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
460
461	/* abort Tx */
462	NFE_WRITE(sc, NFE_TX_CTL, 0);
463
464	/* disable Rx */
465	NFE_WRITE(sc, NFE_RX_CTL, 0);
466
467	/* disable interrupts */
468	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
469
470	/* reset Tx and Rx rings */
471	nfe_reset_tx_ring(sc, &sc->txq);
472	nfe_reset_rx_ring(sc, &sc->rxq);
473}
474
475void
476nfe_watchdog(struct ifnet *ifp)
477{
478}
479
480int
481nfe_init(struct ifnet *ifp)
482{
483	struct nfe_softc	*sc = ifp->if_softc;
484	int r;
485
486	nfe_stop(ifp, 0);
487
488	NFE_WRITE(sc, NFE_TX_UNK, 0);
489
490	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
491	delay(10);
492
493	r = NFE_RXTX_BIT2;
494	if (sc->sc_flags & NFE_40BIT_ADDR)
495		r |= NFE_RXTX_V3MAGIC|NFE_RXTX_RXCHECK;
496	else if (sc->sc_flags & NFE_JUMBO_SUP)
497		r |= NFE_RXTX_V2MAGIC|NFE_RXTX_RXCHECK;
498
499	NFE_WRITE(sc, NFE_RXTX_CTL, r);
500
501	NFE_WRITE(sc, NFE_SETUP_R6, 0);
502
503	/* XXX set MAC address */
504
505	/* Tell MAC where rings are in memory */
506	NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr);
507	NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr);
508
509	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
510	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
511	NFE_WRITE(sc, NFE_TIMER_INT, 970);		/* XXX Magic */
512
513	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
514	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
515
516	/* enable Rx */
517	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
518
519	nfe_setmulti(sc);
520
521	/* enable interrupts */
522	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_RXERR | NFE_IRQ_RX |
523	    NFE_IRQ_RX_NOBUF | NFE_IRQ_TXERR | NFE_IRQ_TX_DONE | NFE_IRQ_LINK |
524	    NFE_IRQ_TXERR2);
525
526	mii_mediachg(&sc->sc_mii);
527
528	timeout_set(&sc->sc_timeout, nfe_tick, sc);
529
530	ifp->if_flags |= IFF_RUNNING;
531	ifp->if_flags &= ~IFF_OACTIVE;
532
533	return (0);
534}
535
536void
537nfe_reset(struct nfe_softc *sc)
538{
539	printf("nfe_reset!\n");
540}
541
542int
543nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring, int count)
544{
545	struct nfe_rx_data *data;
546	struct nfe_desc *desc_v1;
547	struct nfe_desc_v3 *desc_v3;
548	void **desc;
549	int i, nsegs, error, descsize;
550
551	if (sc->sc_flags & NFE_40BIT_ADDR) {
552		desc = (void **)&ring->desc_v3;
553		descsize = sizeof(struct nfe_desc_v3);
554	} else {
555		desc = (void **)&ring->desc_v1;
556		descsize = sizeof(struct nfe_desc);
557	}
558
559	ring->count = count;
560	ring->cur = ring->next = 0;
561
562	error = bus_dmamap_create(sc->sc_dmat, count * descsize, 1,
563	    count * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
564	if (error != 0) {
565		printf("%s: could not create desc DMA map\n",
566		    sc->sc_dev.dv_xname);
567		goto fail;
568	}
569
570	error = bus_dmamem_alloc(sc->sc_dmat, count * descsize,
571	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
572	if (error != 0) {
573		printf("%s: could not allocate DMA memory\n",
574		    sc->sc_dev.dv_xname);
575		goto fail;
576	}
577
578	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
579	    count * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
580	if (error != 0) {
581		printf("%s: could not map desc DMA memory\n",
582		    sc->sc_dev.dv_xname);
583		goto fail;
584	}
585
586	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
587	    count * descsize, NULL, BUS_DMA_NOWAIT);
588	if (error != 0) {
589		printf("%s: could not load desc DMA map\n",
590		    sc->sc_dev.dv_xname);
591		goto fail;
592	}
593
594	memset(*desc, 0, count * descsize);
595	ring->physaddr = ring->map->dm_segs->ds_addr;
596
597	ring->data = malloc(count * sizeof (struct nfe_rx_data), M_DEVBUF,
598	    M_NOWAIT);
599	if (ring->data == NULL) {
600		printf("%s: could not allocate soft data\n",
601		    sc->sc_dev.dv_xname);
602		error = ENOMEM;
603		goto fail;
604	}
605
606	/*
607	 * Pre-allocate Rx buffers and populate Rx ring.
608	 */
609	memset(ring->data, 0, count * sizeof (struct nfe_rx_data));
610	for (i = 0; i < count; i++) {
611		data = &sc->rxq.data[i];
612
613		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
614		    0, BUS_DMA_NOWAIT, &data->map);
615		if (error != 0) {
616			printf("%s: could not create DMA map\n",
617			    sc->sc_dev.dv_xname);
618			goto fail;
619		}
620
621		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
622		if (data->m == NULL) {
623			printf("%s: could not allocate rx mbuf\n",
624			    sc->sc_dev.dv_xname);
625			error = ENOMEM;
626			goto fail;
627		}
628
629		MCLGET(data->m, M_DONTWAIT);
630		if (!(data->m->m_flags & M_EXT)) {
631			printf("%s: could not allocate rx mbuf cluster\n",
632			    sc->sc_dev.dv_xname);
633			error = ENOMEM;
634			goto fail;
635		}
636
637		error = bus_dmamap_load(sc->sc_dmat, data->map,
638		    mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT);
639		if (error != 0) {
640			printf("%s: could not load rx buf DMA map",
641			    sc->sc_dev.dv_xname);
642			goto fail;
643		}
644
645		if (sc->sc_flags & NFE_40BIT_ADDR) {
646			desc_v3 = &sc->rxq.desc_v3[i];
647			desc_v3->physaddr[0] =
648#if 0
649			(htole64(data->map->dm_segs->ds_addr) >> 32) & 0xffffffff;
650#endif
651			    0;
652			desc_v3->physaddr[1] =
653			    htole64(data->map->dm_segs->ds_addr) & 0xffffffff;
654
655			desc_v3->length = htole16(MCLBYTES);
656			desc_v3->flags = htole16(NFE_RX_READY);
657		} else {
658			desc_v1 = &sc->rxq.desc_v1[i];
659			desc_v1->physaddr =
660			    htole32(data->map->dm_segs->ds_addr);
661			desc_v1->length = htole16(MCLBYTES);
662			desc_v1->flags = htole16(NFE_RX_READY);
663		}
664	}
665
666	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
667	    BUS_DMASYNC_PREWRITE);
668
669	return 0;
670
671fail:	nfe_free_rx_ring(sc, ring);
672	return error;
673}
674
675void
676nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
677{
678	int i;
679
680	for (i = 0; i < ring->count; i++) {
681		if (sc->sc_flags & NFE_40BIT_ADDR) {
682			ring->desc_v3[i].length = htole16(MCLBYTES);
683			ring->desc_v3[i].flags = htole16(NFE_RX_READY);
684		} else {
685			ring->desc_v1[i].length = htole16(MCLBYTES);
686			ring->desc_v1[i].flags = htole16(NFE_RX_READY);
687		}
688	}
689
690	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
691	    BUS_DMASYNC_PREWRITE);
692
693	ring->cur = ring->next = 0;
694}
695
696void
697nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
698{
699	struct nfe_rx_data *data;
700	void *desc;
701	int i, descsize;
702
703	if (sc->sc_flags & NFE_40BIT_ADDR) {
704		desc = ring->desc_v3;
705		descsize = sizeof(struct nfe_desc_v3);
706	} else {
707		desc = ring->desc_v1;
708		descsize = sizeof(struct nfe_desc);
709	}
710
711	if (desc != NULL) {
712		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
713		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
714		bus_dmamap_unload(sc->sc_dmat, ring->map);
715		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
716		    ring->count * descsize);
717		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
718	}
719
720	if (ring->data != NULL) {
721		for (i = 0; i < ring->count; i++) {
722			data = &ring->data[i];
723
724			if (data->m != NULL) {
725				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
726				    data->map->dm_mapsize,
727				    BUS_DMASYNC_POSTREAD);
728				bus_dmamap_unload(sc->sc_dmat, data->map);
729				m_freem(data->m);
730			}
731
732			if (data->map != NULL)
733				bus_dmamap_destroy(sc->sc_dmat, data->map);
734		}
735		free(ring->data, M_DEVBUF);
736	}
737}
738
739int
740nfe_rxintr(struct nfe_softc *sc)
741{
742	printf("nfe_rxintr!\n");
743	return (0);
744}
745
746int
747nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring, int count)
748{
749	int i, nsegs, error;
750	void **desc;
751	int descsize;
752
753	if (sc->sc_flags & NFE_40BIT_ADDR) {
754		desc = (void **)&ring->desc_v3;
755		descsize = sizeof(struct nfe_desc_v3);
756	} else {
757		desc = (void **)&ring->desc_v1;
758		descsize = sizeof(struct nfe_desc);
759	}
760
761	ring->count = count;
762	ring->queued = 0;
763	ring->cur = ring->next = 0;
764
765	error = bus_dmamap_create(sc->sc_dmat, count * descsize, 1,
766	    count * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
767
768	if (error != 0) {
769		printf("%s: could not create desc DMA map\n",
770		    sc->sc_dev.dv_xname);
771		goto fail;
772	}
773
774	error = bus_dmamem_alloc(sc->sc_dmat, count * descsize,
775	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
776	if (error != 0) {
777		printf("%s: could not allocate DMA memory\n",
778		    sc->sc_dev.dv_xname);
779		goto fail;
780	}
781
782	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
783	    count * sizeof(struct nfe_desc), (caddr_t *)desc,
784	    BUS_DMA_NOWAIT);
785	if (error != 0) {
786		printf("%s: could not map desc DMA memory\n",
787		    sc->sc_dev.dv_xname);
788		goto fail;
789	}
790
791	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
792	    count * descsize, NULL, BUS_DMA_NOWAIT);
793	if (error != 0) {
794		printf("%s: could not load desc DMA map\n",
795		    sc->sc_dev.dv_xname);
796		goto fail;
797	}
798
799	memset(desc, 0, count * sizeof(struct nfe_desc));
800	ring->physaddr = ring->map->dm_segs->ds_addr;
801
802	ring->data = malloc(count * sizeof(struct nfe_tx_data), M_DEVBUF,
803	    M_NOWAIT);
804	if (ring->data == NULL) {
805		printf("%s: could not allocate soft data\n",
806		    sc->sc_dev.dv_xname);
807		error = ENOMEM;
808		goto fail;
809	}
810
811	memset(ring->data, 0, count * sizeof (struct nfe_tx_data));
812	for (i = 0; i < count; i++) {
813		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
814		    NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
815		    &ring->data[i].map);
816		if (error != 0) {
817			printf("%s: could not create DMA map\n",
818			    sc->sc_dev.dv_xname);
819			goto fail;
820		}
821	}
822
823	return 0;
824
825fail:	nfe_free_tx_ring(sc, ring);
826	return error;
827}
828
829void
830nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
831{
832	void *desc;
833	struct nfe_tx_data *data;
834	int i;
835
836	for (i = 0; i < ring->count; i++) {
837		if (sc->sc_flags & NFE_40BIT_ADDR)
838			desc = &ring->desc_v3[i];
839		else
840			desc = &ring->desc_v1[i];
841
842		data = &ring->data[i];
843
844		if (data->m != NULL) {
845			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
846			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
847			bus_dmamap_unload(sc->sc_dmat, data->map);
848			m_freem(data->m);
849			data->m = NULL;
850		}
851
852		if (sc->sc_flags & NFE_40BIT_ADDR)
853			((struct nfe_desc_v3 *)desc)->flags = 0;
854		else
855			((struct nfe_desc *)desc)->flags = 0;
856	}
857
858	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
859	    BUS_DMASYNC_PREWRITE);
860
861	ring->queued = 0;
862	ring->cur = ring->next = 0;
863}
864
865void
866nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
867{
868	struct nfe_tx_data *data;
869	void *desc;
870	int i, descsize;
871
872	if (sc->sc_flags & NFE_40BIT_ADDR) {
873		desc = ring->desc_v3;
874		descsize = sizeof(struct nfe_desc_v3);
875	} else {
876		desc = ring->desc_v1;
877		descsize = sizeof(struct nfe_desc);
878	}
879
880	if (desc != NULL) {
881		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
882		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
883		bus_dmamap_unload(sc->sc_dmat, ring->map);
884		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
885		    ring->count * descsize);
886		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
887	}
888
889	if (ring->data != NULL) {
890		for (i = 0; i < ring->count; i++) {
891			data = &ring->data[i];
892
893			if (data->m != NULL) {
894				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
895				    data->map->dm_mapsize,
896				    BUS_DMASYNC_POSTWRITE);
897				bus_dmamap_unload(sc->sc_dmat, data->map);
898				m_freem(data->m);
899			}
900
901			if (data->map != NULL)
902				bus_dmamap_destroy(sc->sc_dmat, data->map);
903		}
904		free(ring->data, M_DEVBUF);
905	}
906}
907
908int
909nfe_txintr(struct nfe_softc *sc)
910{
911	printf("nfe_txintr!\n");
912	return (0);
913}
914
915int
916nfe_mediachange(struct ifnet *ifp)
917{
918	struct nfe_softc *sc = ifp->if_softc;
919	struct mii_data	*mii = &sc->sc_mii;
920	int val;
921
922	DPRINTF(("nfe_mediachange\n"));
923#if 0
924	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
925		/* XXX? */
926	else
927#endif
928		val = 0;
929
930	val |= NFE_MEDIA_SET;
931
932	switch (IFM_SUBTYPE(mii->mii_media_active)) {
933	case IFM_1000_T:
934		val |= NFE_MEDIA_1000T;
935		break;
936	case IFM_100_TX:
937		val |= NFE_MEDIA_100TX;
938		break;
939	case IFM_10_T:
940		val |= NFE_MEDIA_10T;
941		break;
942	}
943
944	DPRINTF(("nfe_miibus_statchg: val=0x%x\n", val));
945	NFE_WRITE(sc, NFE_LINKSPEED, val);
946	return (0);
947}
948
949void
950nfe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
951{
952	struct nfe_softc *sc = ifp->if_softc;
953
954	mii_pollstat(&sc->sc_mii);
955	ifmr->ifm_status = sc->sc_mii.mii_media_status;
956	ifmr->ifm_active = sc->sc_mii.mii_media_active;
957}
958
959void
960nfe_setmulti(struct nfe_softc *sc)
961{
962	NFE_WRITE(sc, NFE_MULT_ADDR1, 0x01);
963	NFE_WRITE(sc, NFE_MULT_ADDR2, 0);
964	NFE_WRITE(sc, NFE_MULT_MASK1, 0);
965	NFE_WRITE(sc, NFE_MULT_MASK2, 0);
966#ifdef notyet
967	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_MYADDR);
968#else
969	NFE_WRITE(sc, NFE_MULTI_FLAGS, NFE_MC_ALWAYS | NFE_MC_PROMISC);
970#endif
971}
972
973void
974nfe_update_promisc(struct nfe_softc *sc)
975{
976}
977
978void
979nfe_tick(void *arg)
980{
981	struct nfe_softc *sc = arg;
982	int s;
983
984	s = splnet();
985	mii_tick(&sc->sc_mii);
986	splx(s);
987
988	timeout_add(&sc->sc_timeout, hz);
989}
990