if_nfe.c revision 1.120
1/*	$OpenBSD: if_nfe.c,v 1.120 2017/09/08 05:36:52 deraadt Exp $	*/
2
3/*-
4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/sockio.h>
29#include <sys/mbuf.h>
30#include <sys/queue.h>
31#include <sys/kernel.h>
32#include <sys/device.h>
33#include <sys/timeout.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_media.h>
40
41#include <netinet/in.h>
42#include <netinet/if_ether.h>
43
44#if NBPFILTER > 0
45#include <net/bpf.h>
46#endif
47
48#include <dev/mii/miivar.h>
49
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pcivar.h>
52#include <dev/pci/pcidevs.h>
53
54#include <dev/pci/if_nfereg.h>
55#include <dev/pci/if_nfevar.h>
56
57int	nfe_match(struct device *, void *, void *);
58void	nfe_attach(struct device *, struct device *, void *);
59int	nfe_activate(struct device *, int);
60void	nfe_miibus_statchg(struct device *);
61int	nfe_miibus_readreg(struct device *, int, int);
62void	nfe_miibus_writereg(struct device *, int, int, int);
63int	nfe_intr(void *);
64int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
65void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
66void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
67void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
68void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
69void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
70void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
71void	nfe_rxeof(struct nfe_softc *);
72void	nfe_txeof(struct nfe_softc *);
73int	nfe_encap(struct nfe_softc *, struct mbuf *);
74void	nfe_start(struct ifnet *);
75void	nfe_watchdog(struct ifnet *);
76int	nfe_init(struct ifnet *);
77void	nfe_stop(struct ifnet *, int);
78int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
79void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
80void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
81int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
82void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
83void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
84int	nfe_ifmedia_upd(struct ifnet *);
85void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
86void	nfe_iff(struct nfe_softc *);
87void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
88void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
89void	nfe_tick(void *);
90#ifndef SMALL_KERNEL
91int	nfe_wol(struct ifnet*, int);
92#endif
93
94struct cfattach nfe_ca = {
95	sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL,
96	nfe_activate
97};
98
99struct cfdriver nfe_cd = {
100	NULL, "nfe", DV_IFNET
101};
102
103#ifdef NFE_DEBUG
104int nfedebug = 0;
105#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
106#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
107#else
108#define DPRINTF(x)
109#define DPRINTFN(n,x)
110#endif
111
112const struct pci_matchid nfe_devices[] = {
113	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
114	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
115	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
116	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
117	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
118	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
119	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
120	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
136	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
137	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
138	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
139	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
140	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
141	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
142	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
143	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
144	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
145	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
146	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
147	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
148	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
149	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
150	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
151	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 },
152	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN }
153};
154
155int
156nfe_match(struct device *dev, void *match, void *aux)
157{
158	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
159	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
160}
161
162int
163nfe_activate(struct device *self, int act)
164{
165	struct nfe_softc *sc = (struct nfe_softc *)self;
166	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
167	int rv = 0;
168
169	switch (act) {
170	case DVACT_SUSPEND:
171		if (ifp->if_flags & IFF_RUNNING)
172			nfe_stop(ifp, 0);
173		rv = config_activate_children(self, act);
174		break;
175	case DVACT_RESUME:
176		if (ifp->if_flags & IFF_UP)
177			nfe_init(ifp);
178		break;
179	default:
180		rv = config_activate_children(self, act);
181		break;
182	}
183	return (rv);
184}
185
186
187void
188nfe_attach(struct device *parent, struct device *self, void *aux)
189{
190	struct nfe_softc *sc = (struct nfe_softc *)self;
191	struct pci_attach_args *pa = aux;
192	pci_chipset_tag_t pc = pa->pa_pc;
193	pci_intr_handle_t ih;
194	const char *intrstr;
195	struct ifnet *ifp;
196	bus_size_t memsize;
197	pcireg_t memtype;
198
199	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
200	if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
201	    &sc->sc_memh, NULL, &memsize, 0)) {
202		printf(": can't map mem space\n");
203		return;
204	}
205
206	if (pci_intr_map(pa, &ih) != 0) {
207		printf(": can't map interrupt\n");
208		return;
209	}
210
211	intrstr = pci_intr_string(pc, ih);
212	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
213	    sc->sc_dev.dv_xname);
214	if (sc->sc_ih == NULL) {
215		printf(": could not establish interrupt");
216		if (intrstr != NULL)
217			printf(" at %s", intrstr);
218		printf("\n");
219		return;
220	}
221	printf(": %s", intrstr);
222
223	sc->sc_dmat = pa->pa_dmat;
224	sc->sc_flags = 0;
225
226	switch (PCI_PRODUCT(pa->pa_id)) {
227	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
228	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
229	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
230	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
231		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
232		break;
233	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
234	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
235		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
236		break;
237	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
238	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
239	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
240	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
241	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
242	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
243	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
244	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
245	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
246	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
247	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
248	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
249		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
250		    NFE_PWR_MGMT;
251		break;
252	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
253	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
254	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
255	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
256		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
257		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
258		break;
259	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
260	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
261	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
262	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
263	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
264		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
265		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
266		break;
267	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
268	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
269	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
270	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
271		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
272		break;
273	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
274	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
275	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
276	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
277		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
278		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
279		break;
280	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
281	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
282		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
283		    NFE_HW_VLAN | NFE_PWR_MGMT;
284		break;
285	}
286
287	if (sc->sc_flags & NFE_PWR_MGMT) {
288		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
289		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
290		DELAY(100);
291		NFE_WRITE(sc, NFE_MAC_RESET, 0);
292		DELAY(100);
293		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
294		NFE_WRITE(sc, NFE_PWR2_CTL,
295		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
296	}
297
298	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
299	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
300
301	/*
302	 * Allocate Tx and Rx rings.
303	 */
304	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
305		printf("%s: could not allocate Tx ring\n",
306		    sc->sc_dev.dv_xname);
307		return;
308	}
309
310	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
311		printf("%s: could not allocate Rx ring\n",
312		    sc->sc_dev.dv_xname);
313		nfe_free_tx_ring(sc, &sc->txq);
314		return;
315	}
316
317	ifp = &sc->sc_arpcom.ac_if;
318	ifp->if_softc = sc;
319	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
320	ifp->if_ioctl = nfe_ioctl;
321	ifp->if_start = nfe_start;
322	ifp->if_watchdog = nfe_watchdog;
323	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
324	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
325
326	ifp->if_capabilities = IFCAP_VLAN_MTU;
327
328#ifndef SMALL_KERNEL
329	ifp->if_capabilities |= IFCAP_WOL;
330	ifp->if_wol = nfe_wol;
331	nfe_wol(ifp, 0);
332#endif
333
334#if NVLAN > 0
335	if (sc->sc_flags & NFE_HW_VLAN)
336		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
337#endif
338
339	if (sc->sc_flags & NFE_HW_CSUM) {
340		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
341		    IFCAP_CSUM_UDPv4;
342	}
343
344	sc->sc_mii.mii_ifp = ifp;
345	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
346	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
347	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
348
349	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
350	    nfe_ifmedia_sts);
351	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
352	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
353		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
354		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
355		    0, NULL);
356		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
357	} else
358		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
359
360	if_attach(ifp);
361	ether_ifattach(ifp);
362
363	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
364}
365
366void
367nfe_miibus_statchg(struct device *dev)
368{
369	struct nfe_softc *sc = (struct nfe_softc *)dev;
370	struct mii_data *mii = &sc->sc_mii;
371	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
372
373	phy = NFE_READ(sc, NFE_PHY_IFACE);
374	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
375
376	seed = NFE_READ(sc, NFE_RNDSEED);
377	seed &= ~NFE_SEED_MASK;
378
379	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
380		phy  |= NFE_PHY_HDX;	/* half-duplex */
381		misc |= NFE_MISC1_HDX;
382	}
383
384	switch (IFM_SUBTYPE(mii->mii_media_active)) {
385	case IFM_1000_T:	/* full-duplex only */
386		link |= NFE_MEDIA_1000T;
387		seed |= NFE_SEED_1000T;
388		phy  |= NFE_PHY_1000T;
389		break;
390	case IFM_100_TX:
391		link |= NFE_MEDIA_100TX;
392		seed |= NFE_SEED_100TX;
393		phy  |= NFE_PHY_100TX;
394		break;
395	case IFM_10_T:
396		link |= NFE_MEDIA_10T;
397		seed |= NFE_SEED_10T;
398		break;
399	}
400
401	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
402
403	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
404	NFE_WRITE(sc, NFE_MISC1, misc);
405	NFE_WRITE(sc, NFE_LINKSPEED, link);
406}
407
408int
409nfe_miibus_readreg(struct device *dev, int phy, int reg)
410{
411	struct nfe_softc *sc = (struct nfe_softc *)dev;
412	uint32_t val;
413	int ntries;
414
415	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
416
417	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
418		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
419		DELAY(100);
420	}
421
422	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
423
424	for (ntries = 0; ntries < 1000; ntries++) {
425		DELAY(100);
426		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
427			break;
428	}
429	if (ntries == 1000) {
430		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
431		    sc->sc_dev.dv_xname));
432		return 0;
433	}
434
435	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
436		DPRINTFN(2, ("%s: could not read PHY\n",
437		    sc->sc_dev.dv_xname));
438		return 0;
439	}
440
441	val = NFE_READ(sc, NFE_PHY_DATA);
442	if (val != 0xffffffff && val != 0)
443		sc->mii_phyaddr = phy;
444
445	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
446	    sc->sc_dev.dv_xname, phy, reg, val));
447
448	return val;
449}
450
451void
452nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
453{
454	struct nfe_softc *sc = (struct nfe_softc *)dev;
455	uint32_t ctl;
456	int ntries;
457
458	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
459
460	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
461		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
462		DELAY(100);
463	}
464
465	NFE_WRITE(sc, NFE_PHY_DATA, val);
466	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
467	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
468
469	for (ntries = 0; ntries < 1000; ntries++) {
470		DELAY(100);
471		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
472			break;
473	}
474#ifdef NFE_DEBUG
475	if (nfedebug >= 2 && ntries == 1000)
476		printf("could not write to PHY\n");
477#endif
478}
479
480int
481nfe_intr(void *arg)
482{
483	struct nfe_softc *sc = arg;
484	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
485	uint32_t r;
486
487	if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0)
488		return 0;	/* not for us */
489	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
490
491	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
492
493	if (r & NFE_IRQ_LINK) {
494		NFE_READ(sc, NFE_PHY_STATUS);
495		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
496		DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
497	}
498
499	if (ifp->if_flags & IFF_RUNNING) {
500		/* check Rx ring */
501		nfe_rxeof(sc);
502
503		/* check Tx ring */
504		nfe_txeof(sc);
505	}
506
507	return 1;
508}
509
510int
511nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
512{
513	struct nfe_softc *sc = ifp->if_softc;
514	struct ifreq *ifr = (struct ifreq *)data;
515	int s, error = 0;
516
517	s = splnet();
518
519	switch (cmd) {
520	case SIOCSIFADDR:
521		ifp->if_flags |= IFF_UP;
522		if (!(ifp->if_flags & IFF_RUNNING))
523			nfe_init(ifp);
524		break;
525
526	case SIOCSIFFLAGS:
527		if (ifp->if_flags & IFF_UP) {
528			if (ifp->if_flags & IFF_RUNNING)
529				error = ENETRESET;
530			else
531				nfe_init(ifp);
532		} else {
533			if (ifp->if_flags & IFF_RUNNING)
534				nfe_stop(ifp, 1);
535		}
536		break;
537
538	case SIOCSIFMEDIA:
539	case SIOCGIFMEDIA:
540		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
541		break;
542
543	default:
544		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
545	}
546
547	if (error == ENETRESET) {
548		if (ifp->if_flags & IFF_RUNNING)
549			nfe_iff(sc);
550		error = 0;
551	}
552
553	splx(s);
554	return error;
555}
556
557void
558nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
559{
560	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
561	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
562	    sizeof (struct nfe_desc32), ops);
563}
564
565void
566nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
567{
568	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
569	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
570	    sizeof (struct nfe_desc64), ops);
571}
572
573void
574nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
575{
576	if (end > start) {
577		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
578		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
579		    (caddr_t)&sc->txq.desc32[end] -
580		    (caddr_t)&sc->txq.desc32[start], ops);
581		return;
582	}
583	/* sync from 'start' to end of ring */
584	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
585	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
586	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
587	    (caddr_t)&sc->txq.desc32[start], ops);
588
589	/* sync from start of ring to 'end' */
590	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
591	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
592}
593
594void
595nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
596{
597	if (end > start) {
598		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
599		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
600		    (caddr_t)&sc->txq.desc64[end] -
601		    (caddr_t)&sc->txq.desc64[start], ops);
602		return;
603	}
604	/* sync from 'start' to end of ring */
605	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
606	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
607	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
608	    (caddr_t)&sc->txq.desc64[start], ops);
609
610	/* sync from start of ring to 'end' */
611	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
612	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
613}
614
615void
616nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
617{
618	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
619	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
620	    sizeof (struct nfe_desc32), ops);
621}
622
623void
624nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
625{
626	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
627	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
628	    sizeof (struct nfe_desc64), ops);
629}
630
631void
632nfe_rxeof(struct nfe_softc *sc)
633{
634	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
635	struct nfe_desc32 *desc32;
636	struct nfe_desc64 *desc64;
637	struct nfe_rx_data *data;
638	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
639	struct mbuf *m, *mnew;
640	bus_addr_t physaddr;
641#if NVLAN > 0
642	uint32_t vtag;
643#endif
644	uint16_t flags;
645	int error, len;
646
647	for (;;) {
648		data = &sc->rxq.data[sc->rxq.cur];
649
650		if (sc->sc_flags & NFE_40BIT_ADDR) {
651			desc64 = &sc->rxq.desc64[sc->rxq.cur];
652			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
653
654			flags = letoh16(desc64->flags);
655			len = letoh16(desc64->length) & 0x3fff;
656#if NVLAN > 0
657			vtag = letoh32(desc64->physaddr[1]);
658#endif
659		} else {
660			desc32 = &sc->rxq.desc32[sc->rxq.cur];
661			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
662
663			flags = letoh16(desc32->flags);
664			len = letoh16(desc32->length) & 0x3fff;
665		}
666
667		if (flags & NFE_RX_READY)
668			break;
669
670		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
671			if (!(flags & NFE_RX_VALID_V1))
672				goto skip;
673
674			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
675				flags &= ~NFE_RX_ERROR;
676				len--;	/* fix buffer length */
677			}
678		} else {
679			if (!(flags & NFE_RX_VALID_V2))
680				goto skip;
681
682			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
683				flags &= ~NFE_RX_ERROR;
684				len--;	/* fix buffer length */
685			}
686		}
687
688		if (flags & NFE_RX_ERROR) {
689			ifp->if_ierrors++;
690			goto skip;
691		}
692
693		/*
694		 * Try to allocate a new mbuf for this ring element and load
695		 * it before processing the current mbuf. If the ring element
696		 * cannot be loaded, drop the received packet and reuse the
697		 * old mbuf. In the unlikely case that the old mbuf can't be
698		 * reloaded either, explicitly panic.
699		 */
700		mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
701		if (mnew == NULL) {
702			ifp->if_ierrors++;
703			goto skip;
704		}
705		mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;
706
707		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
708		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
709		bus_dmamap_unload(sc->sc_dmat, data->map);
710
711		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
712		    BUS_DMA_READ | BUS_DMA_NOWAIT);
713		if (error != 0) {
714			m_freem(mnew);
715
716			/* try to reload the old mbuf */
717			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
718			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
719			if (error != 0) {
720				/* very unlikely that it will fail.. */
721				panic("%s: could not load old rx mbuf",
722				    sc->sc_dev.dv_xname);
723			}
724			ifp->if_ierrors++;
725			goto skip;
726		}
727		physaddr = data->map->dm_segs[0].ds_addr;
728
729		/*
730		 * New mbuf successfully loaded, update Rx ring and continue
731		 * processing.
732		 */
733		m = data->m;
734		data->m = mnew;
735
736		/* finalize mbuf */
737		m->m_pkthdr.len = m->m_len = len;
738
739		if ((sc->sc_flags & NFE_HW_CSUM) &&
740		    (flags & NFE_RX_IP_CSUMOK)) {
741			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
742			if (flags & NFE_RX_UDP_CSUMOK)
743				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
744			if (flags & NFE_RX_TCP_CSUMOK)
745				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
746		}
747
748#if NVLAN > 0
749		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
750			m->m_pkthdr.ether_vtag = vtag & 0xffff;
751			m->m_flags |= M_VLANTAG;
752		}
753#endif
754
755		ml_enqueue(&ml, m);
756
757		/* update mapping address in h/w descriptor */
758		if (sc->sc_flags & NFE_40BIT_ADDR) {
759#if defined(__LP64__)
760			desc64->physaddr[0] = htole32(physaddr >> 32);
761#endif
762			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
763		} else {
764			desc32->physaddr = htole32(physaddr);
765		}
766
767skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
768			desc64->length = htole16(sc->rxq.bufsz);
769			desc64->flags = htole16(NFE_RX_READY);
770
771			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
772		} else {
773			desc32->length = htole16(sc->rxq.bufsz);
774			desc32->flags = htole16(NFE_RX_READY);
775
776			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
777		}
778
779		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
780	}
781	if_input(ifp, &ml);
782}
783
784void
785nfe_txeof(struct nfe_softc *sc)
786{
787	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
788	struct nfe_desc32 *desc32;
789	struct nfe_desc64 *desc64;
790	struct nfe_tx_data *data = NULL;
791	uint16_t flags;
792
793	while (sc->txq.next != sc->txq.cur) {
794		if (sc->sc_flags & NFE_40BIT_ADDR) {
795			desc64 = &sc->txq.desc64[sc->txq.next];
796			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
797
798			flags = letoh16(desc64->flags);
799		} else {
800			desc32 = &sc->txq.desc32[sc->txq.next];
801			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
802
803			flags = letoh16(desc32->flags);
804		}
805
806		if (flags & NFE_TX_VALID)
807			break;
808
809		data = &sc->txq.data[sc->txq.next];
810
811		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
812			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
813				goto skip;
814
815			if ((flags & NFE_TX_ERROR_V1) != 0) {
816				printf("%s: tx v1 error %b\n",
817				    sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
818				ifp->if_oerrors++;
819			}
820		} else {
821			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
822				goto skip;
823
824			if ((flags & NFE_TX_ERROR_V2) != 0) {
825				printf("%s: tx v2 error %b\n",
826				    sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
827				ifp->if_oerrors++;
828			}
829		}
830
831		if (data->m == NULL) {	/* should not get there */
832			printf("%s: last fragment bit w/o associated mbuf!\n",
833			    sc->sc_dev.dv_xname);
834			goto skip;
835		}
836
837		/* last fragment of the mbuf chain transmitted */
838		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
839		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
840		bus_dmamap_unload(sc->sc_dmat, data->active);
841		m_freem(data->m);
842		data->m = NULL;
843
844		ifp->if_timer = 0;
845
846skip:		sc->txq.queued--;
847		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
848	}
849
850	if (data != NULL) {	/* at least one slot freed */
851		ifq_clr_oactive(&ifp->if_snd);
852		nfe_start(ifp);
853	}
854}
855
856int
857nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
858{
859	struct nfe_desc32 *desc32;
860	struct nfe_desc64 *desc64;
861	struct nfe_tx_data *data;
862	bus_dmamap_t map;
863	uint16_t flags = 0;
864	uint32_t vtag = 0;
865	int error, i, first = sc->txq.cur;
866
867	map = sc->txq.data[first].map;
868
869	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
870	if (error != 0) {
871		printf("%s: can't map mbuf (error %d)\n",
872		    sc->sc_dev.dv_xname, error);
873		return error;
874	}
875
876	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
877		bus_dmamap_unload(sc->sc_dmat, map);
878		return ENOBUFS;
879	}
880
881#if NVLAN > 0
882	/* setup h/w VLAN tagging */
883	if (m0->m_flags & M_VLANTAG)
884		vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag;
885#endif
886	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
887		flags |= NFE_TX_IP_CSUM;
888	if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
889		flags |= NFE_TX_TCP_UDP_CSUM;
890
891	for (i = 0; i < map->dm_nsegs; i++) {
892		data = &sc->txq.data[sc->txq.cur];
893
894		if (sc->sc_flags & NFE_40BIT_ADDR) {
895			desc64 = &sc->txq.desc64[sc->txq.cur];
896#if defined(__LP64__)
897			desc64->physaddr[0] =
898			    htole32(map->dm_segs[i].ds_addr >> 32);
899#endif
900			desc64->physaddr[1] =
901			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
902			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
903			desc64->flags = htole16(flags);
904			desc64->vtag = htole32(vtag);
905		} else {
906			desc32 = &sc->txq.desc32[sc->txq.cur];
907
908			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
909			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
910			desc32->flags = htole16(flags);
911		}
912
913		if (map->dm_nsegs > 1) {
914			/*
915			 * Checksum flags and vtag belong to the first fragment
916			 * only.
917			 */
918			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
919			vtag = 0;
920
921			/*
922			 * Setting of the valid bit in the first descriptor is
923			 * deferred until the whole chain is fully setup.
924			 */
925			flags |= NFE_TX_VALID;
926		}
927
928		sc->txq.queued++;
929		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
930	}
931
932	/* the whole mbuf chain has been setup */
933	if (sc->sc_flags & NFE_40BIT_ADDR) {
934		/* fix last descriptor */
935		flags |= NFE_TX_LASTFRAG_V2;
936		desc64->flags = htole16(flags);
937
938		/* finally, set the valid bit in the first descriptor */
939		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
940	} else {
941		/* fix last descriptor */
942		if (sc->sc_flags & NFE_JUMBO_SUP)
943			flags |= NFE_TX_LASTFRAG_V2;
944		else
945			flags |= NFE_TX_LASTFRAG_V1;
946		desc32->flags = htole16(flags);
947
948		/* finally, set the valid bit in the first descriptor */
949		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
950	}
951
952	data->m = m0;
953	data->active = map;
954
955	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
956	    BUS_DMASYNC_PREWRITE);
957
958	return 0;
959}
960
961void
962nfe_start(struct ifnet *ifp)
963{
964	struct nfe_softc *sc = ifp->if_softc;
965	int old = sc->txq.cur;
966	struct mbuf *m0;
967
968	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
969		return;
970
971	for (;;) {
972		m0 = ifq_deq_begin(&ifp->if_snd);
973		if (m0 == NULL)
974			break;
975
976		if (nfe_encap(sc, m0) != 0) {
977			ifq_deq_rollback(&ifp->if_snd, m0);
978			ifq_set_oactive(&ifp->if_snd);
979			break;
980		}
981
982		/* packet put in h/w queue, remove from s/w queue */
983		ifq_deq_commit(&ifp->if_snd, m0);
984
985#if NBPFILTER > 0
986		if (ifp->if_bpf != NULL)
987			bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
988#endif
989	}
990	if (sc->txq.cur == old)	/* nothing sent */
991		return;
992
993	if (sc->sc_flags & NFE_40BIT_ADDR)
994		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
995	else
996		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
997
998	/* kick Tx */
999	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1000
1001	/*
1002	 * Set a timeout in case the chip goes out to lunch.
1003	 */
1004	ifp->if_timer = 5;
1005}
1006
1007void
1008nfe_watchdog(struct ifnet *ifp)
1009{
1010	struct nfe_softc *sc = ifp->if_softc;
1011
1012	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1013
1014	nfe_init(ifp);
1015
1016	ifp->if_oerrors++;
1017}
1018
1019int
1020nfe_init(struct ifnet *ifp)
1021{
1022	struct nfe_softc *sc = ifp->if_softc;
1023	uint32_t tmp;
1024
1025	nfe_stop(ifp, 0);
1026
1027	NFE_WRITE(sc, NFE_TX_UNK, 0);
1028	NFE_WRITE(sc, NFE_STATUS, 0);
1029
1030	sc->rxtxctl = NFE_RXTX_BIT2;
1031	if (sc->sc_flags & NFE_40BIT_ADDR)
1032		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1033	else if (sc->sc_flags & NFE_JUMBO_SUP)
1034		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1035
1036	if (sc->sc_flags & NFE_HW_CSUM)
1037		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1038	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1039		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
1040
1041	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1042	DELAY(10);
1043	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1044
1045	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1046		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1047	else
1048		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
1049
1050	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1051
1052	/* set MAC address */
1053	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1054
1055	/* tell MAC where rings are in memory */
1056#ifdef __LP64__
1057	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1058#endif
1059	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1060#ifdef __LP64__
1061	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1062#endif
1063	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1064
1065	NFE_WRITE(sc, NFE_RING_SIZE,
1066	    (NFE_RX_RING_COUNT - 1) << 16 |
1067	    (NFE_TX_RING_COUNT - 1));
1068
1069	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1070
1071	/* force MAC to wakeup */
1072	tmp = NFE_READ(sc, NFE_PWR_STATE);
1073	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1074	DELAY(10);
1075	tmp = NFE_READ(sc, NFE_PWR_STATE);
1076	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1077
1078#if 1
1079	/* configure interrupts coalescing/mitigation */
1080	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1081#else
1082	/* no interrupt mitigation: one interrupt per packet */
1083	NFE_WRITE(sc, NFE_IMTIMER, 970);
1084#endif
1085
1086	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1087	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1088	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1089
1090	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1091	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1092
1093	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1094
1095	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1096	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1097	DELAY(10);
1098	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1099
1100	/* program promiscuous mode and multicast filters */
1101	nfe_iff(sc);
1102
1103	nfe_ifmedia_upd(ifp);
1104
1105	/* enable Rx */
1106	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1107
1108	/* enable Tx */
1109	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1110
1111	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1112
1113	/* enable interrupts */
1114	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1115
1116	timeout_add_sec(&sc->sc_tick_ch, 1);
1117
1118	ifp->if_flags |= IFF_RUNNING;
1119	ifq_clr_oactive(&ifp->if_snd);
1120
1121	return 0;
1122}
1123
1124void
1125nfe_stop(struct ifnet *ifp, int disable)
1126{
1127	struct nfe_softc *sc = ifp->if_softc;
1128
1129	timeout_del(&sc->sc_tick_ch);
1130
1131	ifp->if_timer = 0;
1132	ifp->if_flags &= ~IFF_RUNNING;
1133	ifq_clr_oactive(&ifp->if_snd);
1134
1135	mii_down(&sc->sc_mii);
1136
1137	/* abort Tx */
1138	NFE_WRITE(sc, NFE_TX_CTL, 0);
1139
1140	if ((sc->sc_flags & NFE_WOL) == 0) {
1141		/* disable Rx */
1142		NFE_WRITE(sc, NFE_RX_CTL, 0);
1143
1144		/* disable interrupts */
1145		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1146	}
1147
1148	/* reset Tx and Rx rings */
1149	nfe_reset_tx_ring(sc, &sc->txq);
1150	nfe_reset_rx_ring(sc, &sc->rxq);
1151}
1152
1153int
1154nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1155{
1156	struct nfe_desc32 *desc32;
1157	struct nfe_desc64 *desc64;
1158	struct nfe_rx_data *data;
1159	void **desc;
1160	bus_addr_t physaddr;
1161	int i, nsegs, error, descsize;
1162
1163	if (sc->sc_flags & NFE_40BIT_ADDR) {
1164		desc = (void **)&ring->desc64;
1165		descsize = sizeof (struct nfe_desc64);
1166	} else {
1167		desc = (void **)&ring->desc32;
1168		descsize = sizeof (struct nfe_desc32);
1169	}
1170
1171	ring->cur = ring->next = 0;
1172	ring->bufsz = MCLBYTES;
1173
1174	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1175	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1176	if (error != 0) {
1177		printf("%s: could not create desc DMA map\n",
1178		    sc->sc_dev.dv_xname);
1179		goto fail;
1180	}
1181
1182	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1183	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1184	if (error != 0) {
1185		printf("%s: could not allocate DMA memory\n",
1186		    sc->sc_dev.dv_xname);
1187		goto fail;
1188	}
1189
1190	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1191	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1192	if (error != 0) {
1193		printf("%s: can't map desc DMA memory\n",
1194		    sc->sc_dev.dv_xname);
1195		goto fail;
1196	}
1197
1198	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1199	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1200	if (error != 0) {
1201		printf("%s: could not load desc DMA map\n",
1202		    sc->sc_dev.dv_xname);
1203		goto fail;
1204	}
1205	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1206
1207	/*
1208	 * Pre-allocate Rx buffers and populate Rx ring.
1209	 */
1210	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1211		data = &sc->rxq.data[i];
1212
1213		data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
1214		if (data->m == NULL) {
1215			printf("%s: could not allocate rx mbuf\n",
1216			    sc->sc_dev.dv_xname);
1217			error = ENOMEM;
1218			goto fail;
1219		}
1220		data->m->m_pkthdr.len = data->m->m_len = MCLBYTES;
1221
1222		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1223		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1224		if (error != 0) {
1225			printf("%s: could not create DMA map\n",
1226			    sc->sc_dev.dv_xname);
1227			goto fail;
1228		}
1229
1230		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m,
1231		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1232		if (error != 0) {
1233			printf("%s: could not load rx buf DMA map",
1234			    sc->sc_dev.dv_xname);
1235			goto fail;
1236		}
1237		physaddr = data->map->dm_segs[0].ds_addr;
1238
1239		if (sc->sc_flags & NFE_40BIT_ADDR) {
1240			desc64 = &sc->rxq.desc64[i];
1241#if defined(__LP64__)
1242			desc64->physaddr[0] = htole32(physaddr >> 32);
1243#endif
1244			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1245			desc64->length = htole16(sc->rxq.bufsz);
1246			desc64->flags = htole16(NFE_RX_READY);
1247		} else {
1248			desc32 = &sc->rxq.desc32[i];
1249			desc32->physaddr = htole32(physaddr);
1250			desc32->length = htole16(sc->rxq.bufsz);
1251			desc32->flags = htole16(NFE_RX_READY);
1252		}
1253	}
1254
1255	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1256	    BUS_DMASYNC_PREWRITE);
1257
1258	return 0;
1259
1260fail:	nfe_free_rx_ring(sc, ring);
1261	return error;
1262}
1263
1264void
1265nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1266{
1267	int i;
1268
1269	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1270		if (sc->sc_flags & NFE_40BIT_ADDR) {
1271			ring->desc64[i].length = htole16(ring->bufsz);
1272			ring->desc64[i].flags = htole16(NFE_RX_READY);
1273		} else {
1274			ring->desc32[i].length = htole16(ring->bufsz);
1275			ring->desc32[i].flags = htole16(NFE_RX_READY);
1276		}
1277	}
1278
1279	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1280	    BUS_DMASYNC_PREWRITE);
1281
1282	ring->cur = ring->next = 0;
1283}
1284
1285void
1286nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1287{
1288	struct nfe_rx_data *data;
1289	void *desc;
1290	int i, descsize;
1291
1292	if (sc->sc_flags & NFE_40BIT_ADDR) {
1293		desc = ring->desc64;
1294		descsize = sizeof (struct nfe_desc64);
1295	} else {
1296		desc = ring->desc32;
1297		descsize = sizeof (struct nfe_desc32);
1298	}
1299
1300	if (desc != NULL) {
1301		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1302		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1303		bus_dmamap_unload(sc->sc_dmat, ring->map);
1304		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1305		    NFE_RX_RING_COUNT * descsize);
1306		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1307	}
1308
1309	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1310		data = &ring->data[i];
1311
1312		if (data->map != NULL) {
1313			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1314			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1315			bus_dmamap_unload(sc->sc_dmat, data->map);
1316			bus_dmamap_destroy(sc->sc_dmat, data->map);
1317		}
1318		m_freem(data->m);
1319	}
1320}
1321
1322int
1323nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1324{
1325	int i, nsegs, error;
1326	void **desc;
1327	int descsize;
1328
1329	if (sc->sc_flags & NFE_40BIT_ADDR) {
1330		desc = (void **)&ring->desc64;
1331		descsize = sizeof (struct nfe_desc64);
1332	} else {
1333		desc = (void **)&ring->desc32;
1334		descsize = sizeof (struct nfe_desc32);
1335	}
1336
1337	ring->queued = 0;
1338	ring->cur = ring->next = 0;
1339
1340	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1341	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1342
1343	if (error != 0) {
1344		printf("%s: could not create desc DMA map\n",
1345		    sc->sc_dev.dv_xname);
1346		goto fail;
1347	}
1348
1349	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1350	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1351	if (error != 0) {
1352		printf("%s: could not allocate DMA memory\n",
1353		    sc->sc_dev.dv_xname);
1354		goto fail;
1355	}
1356
1357	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1358	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1359	if (error != 0) {
1360		printf("%s: can't map desc DMA memory\n",
1361		    sc->sc_dev.dv_xname);
1362		goto fail;
1363	}
1364
1365	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1366	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1367	if (error != 0) {
1368		printf("%s: could not load desc DMA map\n",
1369		    sc->sc_dev.dv_xname);
1370		goto fail;
1371	}
1372	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1373
1374	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1375		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1376		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1377		    &ring->data[i].map);
1378		if (error != 0) {
1379			printf("%s: could not create DMA map\n",
1380			    sc->sc_dev.dv_xname);
1381			goto fail;
1382		}
1383	}
1384
1385	return 0;
1386
1387fail:	nfe_free_tx_ring(sc, ring);
1388	return error;
1389}
1390
1391void
1392nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1393{
1394	struct nfe_tx_data *data;
1395	int i;
1396
1397	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1398		if (sc->sc_flags & NFE_40BIT_ADDR)
1399			ring->desc64[i].flags = 0;
1400		else
1401			ring->desc32[i].flags = 0;
1402
1403		data = &ring->data[i];
1404
1405		if (data->m != NULL) {
1406			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1407			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1408			bus_dmamap_unload(sc->sc_dmat, data->active);
1409			m_freem(data->m);
1410			data->m = NULL;
1411		}
1412	}
1413
1414	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1415	    BUS_DMASYNC_PREWRITE);
1416
1417	ring->queued = 0;
1418	ring->cur = ring->next = 0;
1419}
1420
1421void
1422nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1423{
1424	struct nfe_tx_data *data;
1425	void *desc;
1426	int i, descsize;
1427
1428	if (sc->sc_flags & NFE_40BIT_ADDR) {
1429		desc = ring->desc64;
1430		descsize = sizeof (struct nfe_desc64);
1431	} else {
1432		desc = ring->desc32;
1433		descsize = sizeof (struct nfe_desc32);
1434	}
1435
1436	if (desc != NULL) {
1437		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1438		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1439		bus_dmamap_unload(sc->sc_dmat, ring->map);
1440		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1441		    NFE_TX_RING_COUNT * descsize);
1442		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1443	}
1444
1445	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1446		data = &ring->data[i];
1447
1448		if (data->m != NULL) {
1449			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1450			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1451			bus_dmamap_unload(sc->sc_dmat, data->active);
1452			m_freem(data->m);
1453		}
1454	}
1455
1456	/* ..and now actually destroy the DMA mappings */
1457	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1458		data = &ring->data[i];
1459		if (data->map == NULL)
1460			continue;
1461		bus_dmamap_destroy(sc->sc_dmat, data->map);
1462	}
1463}
1464
1465int
1466nfe_ifmedia_upd(struct ifnet *ifp)
1467{
1468	struct nfe_softc *sc = ifp->if_softc;
1469	struct mii_data *mii = &sc->sc_mii;
1470	struct mii_softc *miisc;
1471
1472	if (mii->mii_instance != 0) {
1473		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1474			mii_phy_reset(miisc);
1475	}
1476	return mii_mediachg(mii);
1477}
1478
1479void
1480nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1481{
1482	struct nfe_softc *sc = ifp->if_softc;
1483	struct mii_data *mii = &sc->sc_mii;
1484
1485	mii_pollstat(mii);
1486	ifmr->ifm_status = mii->mii_media_status;
1487	ifmr->ifm_active = mii->mii_media_active;
1488}
1489
1490void
1491nfe_iff(struct nfe_softc *sc)
1492{
1493	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1494	struct arpcom *ac = &sc->sc_arpcom;
1495	struct ether_multi *enm;
1496	struct ether_multistep step;
1497	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1498	uint32_t filter;
1499	int i;
1500
1501	filter = NFE_RXFILTER_MAGIC;
1502	ifp->if_flags &= ~IFF_ALLMULTI;
1503
1504	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1505		ifp->if_flags |= IFF_ALLMULTI;
1506		if (ifp->if_flags & IFF_PROMISC)
1507			filter |= NFE_PROMISC;
1508		else
1509			filter |= NFE_U2M;
1510		bzero(addr, ETHER_ADDR_LEN);
1511		bzero(mask, ETHER_ADDR_LEN);
1512	} else {
1513		filter |= NFE_U2M;
1514
1515		bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1516		bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1517
1518		ETHER_FIRST_MULTI(step, ac, enm);
1519		while (enm != NULL) {
1520			for (i = 0; i < ETHER_ADDR_LEN; i++) {
1521				addr[i] &=  enm->enm_addrlo[i];
1522				mask[i] &= ~enm->enm_addrlo[i];
1523			}
1524
1525			ETHER_NEXT_MULTI(step, enm);
1526		}
1527
1528		for (i = 0; i < ETHER_ADDR_LEN; i++)
1529			mask[i] |= addr[i];
1530	}
1531
1532	addr[0] |= 0x01;	/* make sure multicast bit is set */
1533
1534	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1535	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1536	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1537	    addr[5] <<  8 | addr[4]);
1538	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1539	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1540	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1541	    mask[5] <<  8 | mask[4]);
1542	NFE_WRITE(sc, NFE_RXFILTER, filter);
1543}
1544
1545void
1546nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1547{
1548	uint32_t tmp;
1549
1550	if (sc->sc_flags & NFE_CORRECT_MACADDR) {
1551		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1552		addr[0] = (tmp & 0xff);
1553		addr[1] = (tmp >>  8) & 0xff;
1554		addr[2] = (tmp >> 16) & 0xff;
1555		addr[3] = (tmp >> 24) & 0xff;
1556
1557		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1558		addr[4] = (tmp & 0xff);
1559		addr[5] = (tmp >> 8) & 0xff;
1560
1561	} else {
1562		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1563		addr[0] = (tmp >> 8) & 0xff;
1564		addr[1] = (tmp & 0xff);
1565
1566		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1567		addr[2] = (tmp >> 24) & 0xff;
1568		addr[3] = (tmp >> 16) & 0xff;
1569		addr[4] = (tmp >>  8) & 0xff;
1570		addr[5] = (tmp & 0xff);
1571	}
1572}
1573
1574void
1575nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1576{
1577	NFE_WRITE(sc, NFE_MACADDR_LO,
1578	    addr[5] <<  8 | addr[4]);
1579	NFE_WRITE(sc, NFE_MACADDR_HI,
1580	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1581}
1582
1583void
1584nfe_tick(void *arg)
1585{
1586	struct nfe_softc *sc = arg;
1587	int s;
1588
1589	s = splnet();
1590	mii_tick(&sc->sc_mii);
1591	splx(s);
1592
1593	timeout_add_sec(&sc->sc_tick_ch, 1);
1594}
1595
1596#ifndef SMALL_KERNEL
1597int
1598nfe_wol(struct ifnet *ifp, int enable)
1599{
1600	struct nfe_softc *sc = ifp->if_softc;
1601
1602	if (enable) {
1603		sc->sc_flags |= NFE_WOL;
1604		NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1605	} else {
1606		sc->sc_flags &= ~NFE_WOL;
1607		NFE_WRITE(sc, NFE_WOL_CTL, 0);
1608	}
1609
1610	return 0;
1611}
1612#endif
1613