if_nfe.c revision 243857
1/*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 243857 2012-12-04 09:32:43Z glebius $");
25
26#ifdef HAVE_KERNEL_OPTION_HEADERS
27#include "opt_device_polling.h"
28#endif
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/kernel.h>
38#include <sys/queue.h>
39#include <sys/socket.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/if.h>
44#include <net/if_arp.h>
45#include <net/ethernet.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_vlan_var.h>
50
51#include <net/bpf.h>
52
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63
64#include <dev/nfe/if_nfereg.h>
65#include <dev/nfe/if_nfevar.h>
66
67MODULE_DEPEND(nfe, pci, 1, 1, 1);
68MODULE_DEPEND(nfe, ether, 1, 1, 1);
69MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70
71/* "device miibus" required.  See GENERIC if you get errors here. */
72#include "miibus_if.h"
73
74static int  nfe_probe(device_t);
75static int  nfe_attach(device_t);
76static int  nfe_detach(device_t);
77static int  nfe_suspend(device_t);
78static int  nfe_resume(device_t);
79static int nfe_shutdown(device_t);
80static int  nfe_can_use_msix(struct nfe_softc *);
81static void nfe_power(struct nfe_softc *);
82static int  nfe_miibus_readreg(device_t, int, int);
83static int  nfe_miibus_writereg(device_t, int, int, int);
84static void nfe_miibus_statchg(device_t);
85static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
86static void nfe_set_intr(struct nfe_softc *);
87static __inline void nfe_enable_intr(struct nfe_softc *);
88static __inline void nfe_disable_intr(struct nfe_softc *);
89static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
90static void nfe_alloc_msix(struct nfe_softc *, int);
91static int nfe_intr(void *);
92static void nfe_int_task(void *, int);
93static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
94static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
95static int nfe_newbuf(struct nfe_softc *, int);
96static int nfe_jnewbuf(struct nfe_softc *, int);
97static int  nfe_rxeof(struct nfe_softc *, int, int *);
98static int  nfe_jrxeof(struct nfe_softc *, int, int *);
99static void nfe_txeof(struct nfe_softc *);
100static int  nfe_encap(struct nfe_softc *, struct mbuf **);
101static void nfe_setmulti(struct nfe_softc *);
102static void nfe_start(struct ifnet *);
103static void nfe_start_locked(struct ifnet *);
104static void nfe_watchdog(struct ifnet *);
105static void nfe_init(void *);
106static void nfe_init_locked(void *);
107static void nfe_stop(struct ifnet *);
108static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
110static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117static int  nfe_ifmedia_upd(struct ifnet *);
118static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
119static void nfe_tick(void *);
120static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
121static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
122static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
123
124static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
125static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
126static void nfe_sysctl_node(struct nfe_softc *);
127static void nfe_stats_clear(struct nfe_softc *);
128static void nfe_stats_update(struct nfe_softc *);
129static void nfe_set_linkspeed(struct nfe_softc *);
130static void nfe_set_wol(struct nfe_softc *);
131
132#ifdef NFE_DEBUG
133static int nfedebug = 0;
134#define	DPRINTF(sc, ...)	do {				\
135	if (nfedebug)						\
136		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
137} while (0)
138#define	DPRINTFN(sc, n, ...)	do {				\
139	if (nfedebug >= (n))					\
140		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
141} while (0)
142#else
143#define	DPRINTF(sc, ...)
144#define	DPRINTFN(sc, n, ...)
145#endif
146
147#define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
148#define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
149#define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
150
151/* Tunables. */
152static int msi_disable = 0;
153static int msix_disable = 0;
154static int jumbo_disable = 0;
155TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
156TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
157TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
158
159static device_method_t nfe_methods[] = {
160	/* Device interface */
161	DEVMETHOD(device_probe,		nfe_probe),
162	DEVMETHOD(device_attach,	nfe_attach),
163	DEVMETHOD(device_detach,	nfe_detach),
164	DEVMETHOD(device_suspend,	nfe_suspend),
165	DEVMETHOD(device_resume,	nfe_resume),
166	DEVMETHOD(device_shutdown,	nfe_shutdown),
167
168	/* MII interface */
169	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
170	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
171	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
172
173	DEVMETHOD_END
174};
175
176static driver_t nfe_driver = {
177	"nfe",
178	nfe_methods,
179	sizeof(struct nfe_softc)
180};
181
182static devclass_t nfe_devclass;
183
184DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
185DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
186
187static struct nfe_type nfe_devs[] = {
188	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
189	    "NVIDIA nForce MCP Networking Adapter"},
190	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
191	    "NVIDIA nForce2 MCP2 Networking Adapter"},
192	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
193	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
194	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
195	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
196	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
197	    "NVIDIA nForce3 MCP3 Networking Adapter"},
198	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
199	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
200	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
201	    "NVIDIA nForce3 MCP7 Networking Adapter"},
202	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
203	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
204	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
205	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
206	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
207	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
208	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
209	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
210	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
211	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
212	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
213	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
214	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
215	    "NVIDIA nForce MCP55 Networking Adapter"},
216	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
217	    "NVIDIA nForce MCP55 Networking Adapter"},
218	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
219	    "NVIDIA nForce MCP61 Networking Adapter"},
220	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
221	    "NVIDIA nForce MCP61 Networking Adapter"},
222	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
223	    "NVIDIA nForce MCP61 Networking Adapter"},
224	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
225	    "NVIDIA nForce MCP61 Networking Adapter"},
226	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
227	    "NVIDIA nForce MCP65 Networking Adapter"},
228	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
229	    "NVIDIA nForce MCP65 Networking Adapter"},
230	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
231	    "NVIDIA nForce MCP65 Networking Adapter"},
232	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
233	    "NVIDIA nForce MCP65 Networking Adapter"},
234	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
235	    "NVIDIA nForce MCP67 Networking Adapter"},
236	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
237	    "NVIDIA nForce MCP67 Networking Adapter"},
238	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
239	    "NVIDIA nForce MCP67 Networking Adapter"},
240	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
241	    "NVIDIA nForce MCP67 Networking Adapter"},
242	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
243	    "NVIDIA nForce MCP73 Networking Adapter"},
244	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
245	    "NVIDIA nForce MCP73 Networking Adapter"},
246	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
247	    "NVIDIA nForce MCP73 Networking Adapter"},
248	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
249	    "NVIDIA nForce MCP73 Networking Adapter"},
250	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
251	    "NVIDIA nForce MCP77 Networking Adapter"},
252	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
253	    "NVIDIA nForce MCP77 Networking Adapter"},
254	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
255	    "NVIDIA nForce MCP77 Networking Adapter"},
256	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
257	    "NVIDIA nForce MCP77 Networking Adapter"},
258	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
259	    "NVIDIA nForce MCP79 Networking Adapter"},
260	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
261	    "NVIDIA nForce MCP79 Networking Adapter"},
262	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
263	    "NVIDIA nForce MCP79 Networking Adapter"},
264	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
265	    "NVIDIA nForce MCP79 Networking Adapter"},
266	{0, 0, NULL}
267};
268
269
270/* Probe for supported hardware ID's */
271static int
272nfe_probe(device_t dev)
273{
274	struct nfe_type *t;
275
276	t = nfe_devs;
277	/* Check for matching PCI DEVICE ID's */
278	while (t->name != NULL) {
279		if ((pci_get_vendor(dev) == t->vid_id) &&
280		    (pci_get_device(dev) == t->dev_id)) {
281			device_set_desc(dev, t->name);
282			return (BUS_PROBE_DEFAULT);
283		}
284		t++;
285	}
286
287	return (ENXIO);
288}
289
290static void
291nfe_alloc_msix(struct nfe_softc *sc, int count)
292{
293	int rid;
294
295	rid = PCIR_BAR(2);
296	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
297	    &rid, RF_ACTIVE);
298	if (sc->nfe_msix_res == NULL) {
299		device_printf(sc->nfe_dev,
300		    "couldn't allocate MSIX table resource\n");
301		return;
302	}
303	rid = PCIR_BAR(3);
304	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
305	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
306	if (sc->nfe_msix_pba_res == NULL) {
307		device_printf(sc->nfe_dev,
308		    "couldn't allocate MSIX PBA resource\n");
309		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
310		    sc->nfe_msix_res);
311		sc->nfe_msix_res = NULL;
312		return;
313	}
314
315	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
316		if (count == NFE_MSI_MESSAGES) {
317			if (bootverbose)
318				device_printf(sc->nfe_dev,
319				    "Using %d MSIX messages\n", count);
320			sc->nfe_msix = 1;
321		} else {
322			if (bootverbose)
323				device_printf(sc->nfe_dev,
324				    "couldn't allocate MSIX\n");
325			pci_release_msi(sc->nfe_dev);
326			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
327			    PCIR_BAR(3), sc->nfe_msix_pba_res);
328			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
329			    PCIR_BAR(2), sc->nfe_msix_res);
330			sc->nfe_msix_pba_res = NULL;
331			sc->nfe_msix_res = NULL;
332		}
333	}
334}
335
336static int
337nfe_attach(device_t dev)
338{
339	struct nfe_softc *sc;
340	struct ifnet *ifp;
341	bus_addr_t dma_addr_max;
342	int error = 0, i, msic, reg, rid;
343
344	sc = device_get_softc(dev);
345	sc->nfe_dev = dev;
346
347	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
348	    MTX_DEF);
349	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
350
351	pci_enable_busmaster(dev);
352
353	rid = PCIR_BAR(0);
354	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
355	    RF_ACTIVE);
356	if (sc->nfe_res[0] == NULL) {
357		device_printf(dev, "couldn't map memory resources\n");
358		mtx_destroy(&sc->nfe_mtx);
359		return (ENXIO);
360	}
361
362	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
363		uint16_t v, width;
364
365		v = pci_read_config(dev, reg + 0x08, 2);
366		/* Change max. read request size to 4096. */
367		v &= ~(7 << 12);
368		v |= (5 << 12);
369		pci_write_config(dev, reg + 0x08, v, 2);
370
371		v = pci_read_config(dev, reg + 0x0c, 2);
372		/* link capability */
373		v = (v >> 4) & 0x0f;
374		width = pci_read_config(dev, reg + 0x12, 2);
375		/* negotiated link width */
376		width = (width >> 4) & 0x3f;
377		if (v != width)
378			device_printf(sc->nfe_dev,
379			    "warning, negotiated width of link(x%d) != "
380			    "max. width of link(x%d)\n", width, v);
381	}
382
383	if (nfe_can_use_msix(sc) == 0) {
384		device_printf(sc->nfe_dev,
385		    "MSI/MSI-X capability black-listed, will use INTx\n");
386		msix_disable = 1;
387		msi_disable = 1;
388	}
389
390	/* Allocate interrupt */
391	if (msix_disable == 0 || msi_disable == 0) {
392		if (msix_disable == 0 &&
393		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
394			nfe_alloc_msix(sc, msic);
395		if (msi_disable == 0 && sc->nfe_msix == 0 &&
396		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
397		    pci_alloc_msi(dev, &msic) == 0) {
398			if (msic == NFE_MSI_MESSAGES) {
399				if (bootverbose)
400					device_printf(dev,
401					    "Using %d MSI messages\n", msic);
402				sc->nfe_msi = 1;
403			} else
404				pci_release_msi(dev);
405		}
406	}
407
408	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
409		rid = 0;
410		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
411		    RF_SHAREABLE | RF_ACTIVE);
412		if (sc->nfe_irq[0] == NULL) {
413			device_printf(dev, "couldn't allocate IRQ resources\n");
414			error = ENXIO;
415			goto fail;
416		}
417	} else {
418		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
419			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
420			    SYS_RES_IRQ, &rid, RF_ACTIVE);
421			if (sc->nfe_irq[i] == NULL) {
422				device_printf(dev,
423				    "couldn't allocate IRQ resources for "
424				    "message %d\n", rid);
425				error = ENXIO;
426				goto fail;
427			}
428		}
429		/* Map interrupts to vector 0. */
430		if (sc->nfe_msix != 0) {
431			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
432			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
433		} else if (sc->nfe_msi != 0) {
434			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
435			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
436		}
437	}
438
439	/* Set IRQ status/mask register. */
440	sc->nfe_irq_status = NFE_IRQ_STATUS;
441	sc->nfe_irq_mask = NFE_IRQ_MASK;
442	sc->nfe_intrs = NFE_IRQ_WANTED;
443	sc->nfe_nointrs = 0;
444	if (sc->nfe_msix != 0) {
445		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
446		sc->nfe_nointrs = NFE_IRQ_WANTED;
447	} else if (sc->nfe_msi != 0) {
448		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
449		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
450	}
451
452	sc->nfe_devid = pci_get_device(dev);
453	sc->nfe_revid = pci_get_revid(dev);
454	sc->nfe_flags = 0;
455
456	switch (sc->nfe_devid) {
457	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
458	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
459	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
460	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
461		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
462		break;
463	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
464	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
465		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
466		break;
467	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
468	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
469	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
470	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
471		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
472		    NFE_MIB_V1;
473		break;
474	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
475	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
476		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
477		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
478		break;
479
480	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
481	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
482	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
483	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
484	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
485	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
486	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
487	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
488	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
489	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
490	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
491	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
492		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
493		    NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
494		break;
495	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
496	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
497	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
498	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
499		/* XXX flow control */
500		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
501		    NFE_CORRECT_MACADDR | NFE_MIB_V3;
502		break;
503	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
504	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
505	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
506	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
507		/* XXX flow control */
508		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
509		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
510		break;
511	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
512	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
513	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
514	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
515		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
516		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
517		    NFE_MIB_V2;
518		break;
519	}
520
521	nfe_power(sc);
522	/* Check for reversed ethernet address */
523	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
524		sc->nfe_flags |= NFE_CORRECT_MACADDR;
525	nfe_get_macaddr(sc, sc->eaddr);
526	/*
527	 * Allocate the parent bus DMA tag appropriate for PCI.
528	 */
529	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
530	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
531		dma_addr_max = NFE_DMA_MAXADDR;
532	error = bus_dma_tag_create(
533	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
534	    1, 0,				/* alignment, boundary */
535	    dma_addr_max,			/* lowaddr */
536	    BUS_SPACE_MAXADDR,			/* highaddr */
537	    NULL, NULL,				/* filter, filterarg */
538	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
539	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
540	    0,					/* flags */
541	    NULL, NULL,				/* lockfunc, lockarg */
542	    &sc->nfe_parent_tag);
543	if (error)
544		goto fail;
545
546	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
547	if (ifp == NULL) {
548		device_printf(dev, "can not if_alloc()\n");
549		error = ENOSPC;
550		goto fail;
551	}
552
553	/*
554	 * Allocate Tx and Rx rings.
555	 */
556	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
557		goto fail;
558
559	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
560		goto fail;
561
562	nfe_alloc_jrx_ring(sc, &sc->jrxq);
563	/* Create sysctl node. */
564	nfe_sysctl_node(sc);
565
566	ifp->if_softc = sc;
567	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
568	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
569	ifp->if_ioctl = nfe_ioctl;
570	ifp->if_start = nfe_start;
571	ifp->if_hwassist = 0;
572	ifp->if_capabilities = 0;
573	ifp->if_init = nfe_init;
574	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
575	ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
576	IFQ_SET_READY(&ifp->if_snd);
577
578	if (sc->nfe_flags & NFE_HW_CSUM) {
579		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
580		ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
581	}
582	ifp->if_capenable = ifp->if_capabilities;
583
584	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
585	/* VLAN capability setup. */
586	ifp->if_capabilities |= IFCAP_VLAN_MTU;
587	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
588		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
589		if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
590			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
591			    IFCAP_VLAN_HWTSO;
592	}
593
594	if (pci_find_cap(dev, PCIY_PMG, &reg) == 0)
595		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
596	ifp->if_capenable = ifp->if_capabilities;
597
598	/*
599	 * Tell the upper layer(s) we support long frames.
600	 * Must appear after the call to ether_ifattach() because
601	 * ether_ifattach() sets ifi_hdrlen to the default value.
602	 */
603	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
604
605#ifdef DEVICE_POLLING
606	ifp->if_capabilities |= IFCAP_POLLING;
607#endif
608
609	/* Do MII setup */
610	error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
611	    nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
612	    MIIF_DOPAUSE);
613	if (error != 0) {
614		device_printf(dev, "attaching PHYs failed\n");
615		goto fail;
616	}
617	ether_ifattach(ifp, sc->eaddr);
618
619	TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
620	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
621	    taskqueue_thread_enqueue, &sc->nfe_tq);
622	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
623	    device_get_nameunit(sc->nfe_dev));
624	error = 0;
625	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
626		error = bus_setup_intr(dev, sc->nfe_irq[0],
627		    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
628		    &sc->nfe_intrhand[0]);
629	} else {
630		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
631			error = bus_setup_intr(dev, sc->nfe_irq[i],
632			    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
633			    &sc->nfe_intrhand[i]);
634			if (error != 0)
635				break;
636		}
637	}
638	if (error) {
639		device_printf(dev, "couldn't set up irq\n");
640		taskqueue_free(sc->nfe_tq);
641		sc->nfe_tq = NULL;
642		ether_ifdetach(ifp);
643		goto fail;
644	}
645
646fail:
647	if (error)
648		nfe_detach(dev);
649
650	return (error);
651}
652
653
654static int
655nfe_detach(device_t dev)
656{
657	struct nfe_softc *sc;
658	struct ifnet *ifp;
659	uint8_t eaddr[ETHER_ADDR_LEN];
660	int i, rid;
661
662	sc = device_get_softc(dev);
663	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
664	ifp = sc->nfe_ifp;
665
666#ifdef DEVICE_POLLING
667	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
668		ether_poll_deregister(ifp);
669#endif
670	if (device_is_attached(dev)) {
671		NFE_LOCK(sc);
672		nfe_stop(ifp);
673		ifp->if_flags &= ~IFF_UP;
674		NFE_UNLOCK(sc);
675		callout_drain(&sc->nfe_stat_ch);
676		ether_ifdetach(ifp);
677	}
678
679	if (ifp) {
680		/* restore ethernet address */
681		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
682			for (i = 0; i < ETHER_ADDR_LEN; i++) {
683				eaddr[i] = sc->eaddr[5 - i];
684			}
685		} else
686			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
687		nfe_set_macaddr(sc, eaddr);
688		if_free(ifp);
689	}
690	if (sc->nfe_miibus)
691		device_delete_child(dev, sc->nfe_miibus);
692	bus_generic_detach(dev);
693	if (sc->nfe_tq != NULL) {
694		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
695		taskqueue_free(sc->nfe_tq);
696		sc->nfe_tq = NULL;
697	}
698
699	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
700		if (sc->nfe_intrhand[i] != NULL) {
701			bus_teardown_intr(dev, sc->nfe_irq[i],
702			    sc->nfe_intrhand[i]);
703			sc->nfe_intrhand[i] = NULL;
704		}
705	}
706
707	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
708		if (sc->nfe_irq[0] != NULL)
709			bus_release_resource(dev, SYS_RES_IRQ, 0,
710			    sc->nfe_irq[0]);
711	} else {
712		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
713			if (sc->nfe_irq[i] != NULL) {
714				bus_release_resource(dev, SYS_RES_IRQ, rid,
715				    sc->nfe_irq[i]);
716				sc->nfe_irq[i] = NULL;
717			}
718		}
719		pci_release_msi(dev);
720	}
721	if (sc->nfe_msix_pba_res != NULL) {
722		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
723		    sc->nfe_msix_pba_res);
724		sc->nfe_msix_pba_res = NULL;
725	}
726	if (sc->nfe_msix_res != NULL) {
727		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
728		    sc->nfe_msix_res);
729		sc->nfe_msix_res = NULL;
730	}
731	if (sc->nfe_res[0] != NULL) {
732		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
733		    sc->nfe_res[0]);
734		sc->nfe_res[0] = NULL;
735	}
736
737	nfe_free_tx_ring(sc, &sc->txq);
738	nfe_free_rx_ring(sc, &sc->rxq);
739	nfe_free_jrx_ring(sc, &sc->jrxq);
740
741	if (sc->nfe_parent_tag) {
742		bus_dma_tag_destroy(sc->nfe_parent_tag);
743		sc->nfe_parent_tag = NULL;
744	}
745
746	mtx_destroy(&sc->nfe_mtx);
747
748	return (0);
749}
750
751
752static int
753nfe_suspend(device_t dev)
754{
755	struct nfe_softc *sc;
756
757	sc = device_get_softc(dev);
758
759	NFE_LOCK(sc);
760	nfe_stop(sc->nfe_ifp);
761	nfe_set_wol(sc);
762	sc->nfe_suspended = 1;
763	NFE_UNLOCK(sc);
764
765	return (0);
766}
767
768
769static int
770nfe_resume(device_t dev)
771{
772	struct nfe_softc *sc;
773	struct ifnet *ifp;
774
775	sc = device_get_softc(dev);
776
777	NFE_LOCK(sc);
778	nfe_power(sc);
779	ifp = sc->nfe_ifp;
780	if (ifp->if_flags & IFF_UP)
781		nfe_init_locked(sc);
782	sc->nfe_suspended = 0;
783	NFE_UNLOCK(sc);
784
785	return (0);
786}
787
788
789static int
790nfe_can_use_msix(struct nfe_softc *sc)
791{
792	static struct msix_blacklist {
793		char	*maker;
794		char	*product;
795	} msix_blacklists[] = {
796		{ "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
797	};
798
799	struct msix_blacklist *mblp;
800	char *maker, *product;
801	int count, n, use_msix;
802
803	/*
804	 * Search base board manufacturer and product name table
805	 * to see this system has a known MSI/MSI-X issue.
806	 */
807	maker = getenv("smbios.planar.maker");
808	product = getenv("smbios.planar.product");
809	use_msix = 1;
810	if (maker != NULL && product != NULL) {
811		count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
812		mblp = msix_blacklists;
813		for (n = 0; n < count; n++) {
814			if (strcmp(maker, mblp->maker) == 0 &&
815			    strcmp(product, mblp->product) == 0) {
816				use_msix = 0;
817				break;
818			}
819			mblp++;
820		}
821	}
822	if (maker != NULL)
823		freeenv(maker);
824	if (product != NULL)
825		freeenv(product);
826
827	return (use_msix);
828}
829
830
831/* Take PHY/NIC out of powerdown, from Linux */
832static void
833nfe_power(struct nfe_softc *sc)
834{
835	uint32_t pwr;
836
837	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
838		return;
839	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
840	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
841	DELAY(100);
842	NFE_WRITE(sc, NFE_MAC_RESET, 0);
843	DELAY(100);
844	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
845	pwr = NFE_READ(sc, NFE_PWR2_CTL);
846	pwr &= ~NFE_PWR2_WAKEUP_MASK;
847	if (sc->nfe_revid >= 0xa3 &&
848	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
849	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
850		pwr |= NFE_PWR2_REVA3;
851	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
852}
853
854
855static void
856nfe_miibus_statchg(device_t dev)
857{
858	struct nfe_softc *sc;
859	struct mii_data *mii;
860	struct ifnet *ifp;
861	uint32_t rxctl, txctl;
862
863	sc = device_get_softc(dev);
864
865	mii = device_get_softc(sc->nfe_miibus);
866	ifp = sc->nfe_ifp;
867
868	sc->nfe_link = 0;
869	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
870	    (IFM_ACTIVE | IFM_AVALID)) {
871		switch (IFM_SUBTYPE(mii->mii_media_active)) {
872		case IFM_10_T:
873		case IFM_100_TX:
874		case IFM_1000_T:
875			sc->nfe_link = 1;
876			break;
877		default:
878			break;
879		}
880	}
881
882	nfe_mac_config(sc, mii);
883	txctl = NFE_READ(sc, NFE_TX_CTL);
884	rxctl = NFE_READ(sc, NFE_RX_CTL);
885	if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
886		txctl |= NFE_TX_START;
887		rxctl |= NFE_RX_START;
888	} else {
889		txctl &= ~NFE_TX_START;
890		rxctl &= ~NFE_RX_START;
891	}
892	NFE_WRITE(sc, NFE_TX_CTL, txctl);
893	NFE_WRITE(sc, NFE_RX_CTL, rxctl);
894}
895
896
897static void
898nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
899{
900	uint32_t link, misc, phy, seed;
901	uint32_t val;
902
903	NFE_LOCK_ASSERT(sc);
904
905	phy = NFE_READ(sc, NFE_PHY_IFACE);
906	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
907
908	seed = NFE_READ(sc, NFE_RNDSEED);
909	seed &= ~NFE_SEED_MASK;
910
911	misc = NFE_MISC1_MAGIC;
912	link = NFE_MEDIA_SET;
913
914	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
915		phy  |= NFE_PHY_HDX;	/* half-duplex */
916		misc |= NFE_MISC1_HDX;
917	}
918
919	switch (IFM_SUBTYPE(mii->mii_media_active)) {
920	case IFM_1000_T:	/* full-duplex only */
921		link |= NFE_MEDIA_1000T;
922		seed |= NFE_SEED_1000T;
923		phy  |= NFE_PHY_1000T;
924		break;
925	case IFM_100_TX:
926		link |= NFE_MEDIA_100TX;
927		seed |= NFE_SEED_100TX;
928		phy  |= NFE_PHY_100TX;
929		break;
930	case IFM_10_T:
931		link |= NFE_MEDIA_10T;
932		seed |= NFE_SEED_10T;
933		break;
934	}
935
936	if ((phy & 0x10000000) != 0) {
937		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
938			val = NFE_R1_MAGIC_1000;
939		else
940			val = NFE_R1_MAGIC_10_100;
941	} else
942		val = NFE_R1_MAGIC_DEFAULT;
943	NFE_WRITE(sc, NFE_SETUP_R1, val);
944
945	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
946
947	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
948	NFE_WRITE(sc, NFE_MISC1, misc);
949	NFE_WRITE(sc, NFE_LINKSPEED, link);
950
951	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
952		/* It seems all hardwares supports Rx pause frames. */
953		val = NFE_READ(sc, NFE_RXFILTER);
954		if ((IFM_OPTIONS(mii->mii_media_active) &
955		    IFM_ETH_RXPAUSE) != 0)
956			val |= NFE_PFF_RX_PAUSE;
957		else
958			val &= ~NFE_PFF_RX_PAUSE;
959		NFE_WRITE(sc, NFE_RXFILTER, val);
960		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
961			val = NFE_READ(sc, NFE_MISC1);
962			if ((IFM_OPTIONS(mii->mii_media_active) &
963			    IFM_ETH_TXPAUSE) != 0) {
964				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
965				    NFE_TX_PAUSE_FRAME_ENABLE);
966				val |= NFE_MISC1_TX_PAUSE;
967			} else {
968				val &= ~NFE_MISC1_TX_PAUSE;
969				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
970				    NFE_TX_PAUSE_FRAME_DISABLE);
971			}
972			NFE_WRITE(sc, NFE_MISC1, val);
973		}
974	} else {
975		/* disable rx/tx pause frames */
976		val = NFE_READ(sc, NFE_RXFILTER);
977		val &= ~NFE_PFF_RX_PAUSE;
978		NFE_WRITE(sc, NFE_RXFILTER, val);
979		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
980			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
981			    NFE_TX_PAUSE_FRAME_DISABLE);
982			val = NFE_READ(sc, NFE_MISC1);
983			val &= ~NFE_MISC1_TX_PAUSE;
984			NFE_WRITE(sc, NFE_MISC1, val);
985		}
986	}
987}
988
989
990static int
991nfe_miibus_readreg(device_t dev, int phy, int reg)
992{
993	struct nfe_softc *sc = device_get_softc(dev);
994	uint32_t val;
995	int ntries;
996
997	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
998
999	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1000		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1001		DELAY(100);
1002	}
1003
1004	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1005
1006	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1007		DELAY(100);
1008		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1009			break;
1010	}
1011	if (ntries == NFE_TIMEOUT) {
1012		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1013		return 0;
1014	}
1015
1016	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1017		DPRINTFN(sc, 2, "could not read PHY\n");
1018		return 0;
1019	}
1020
1021	val = NFE_READ(sc, NFE_PHY_DATA);
1022	if (val != 0xffffffff && val != 0)
1023		sc->mii_phyaddr = phy;
1024
1025	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1026
1027	return (val);
1028}
1029
1030
1031static int
1032nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1033{
1034	struct nfe_softc *sc = device_get_softc(dev);
1035	uint32_t ctl;
1036	int ntries;
1037
1038	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1039
1040	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1041		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1042		DELAY(100);
1043	}
1044
1045	NFE_WRITE(sc, NFE_PHY_DATA, val);
1046	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1047	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1048
1049	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1050		DELAY(100);
1051		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1052			break;
1053	}
1054#ifdef NFE_DEBUG
1055	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1056		device_printf(sc->nfe_dev, "could not write to PHY\n");
1057#endif
1058	return (0);
1059}
1060
1061struct nfe_dmamap_arg {
1062	bus_addr_t nfe_busaddr;
1063};
1064
1065static int
1066nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1067{
1068	struct nfe_dmamap_arg ctx;
1069	struct nfe_rx_data *data;
1070	void *desc;
1071	int i, error, descsize;
1072
1073	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1074		desc = ring->desc64;
1075		descsize = sizeof (struct nfe_desc64);
1076	} else {
1077		desc = ring->desc32;
1078		descsize = sizeof (struct nfe_desc32);
1079	}
1080
1081	ring->cur = ring->next = 0;
1082
1083	error = bus_dma_tag_create(sc->nfe_parent_tag,
1084	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1085	    BUS_SPACE_MAXADDR,			/* lowaddr */
1086	    BUS_SPACE_MAXADDR,			/* highaddr */
1087	    NULL, NULL,				/* filter, filterarg */
1088	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1089	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
1090	    0,					/* flags */
1091	    NULL, NULL,				/* lockfunc, lockarg */
1092	    &ring->rx_desc_tag);
1093	if (error != 0) {
1094		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1095		goto fail;
1096	}
1097
1098	/* allocate memory to desc */
1099	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1100	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1101	if (error != 0) {
1102		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1103		goto fail;
1104	}
1105	if (sc->nfe_flags & NFE_40BIT_ADDR)
1106		ring->desc64 = desc;
1107	else
1108		ring->desc32 = desc;
1109
1110	/* map desc to device visible address space */
1111	ctx.nfe_busaddr = 0;
1112	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1113	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1114	if (error != 0) {
1115		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1116		goto fail;
1117	}
1118	ring->physaddr = ctx.nfe_busaddr;
1119
1120	error = bus_dma_tag_create(sc->nfe_parent_tag,
1121	    1, 0,			/* alignment, boundary */
1122	    BUS_SPACE_MAXADDR,		/* lowaddr */
1123	    BUS_SPACE_MAXADDR,		/* highaddr */
1124	    NULL, NULL,			/* filter, filterarg */
1125	    MCLBYTES, 1,		/* maxsize, nsegments */
1126	    MCLBYTES,			/* maxsegsize */
1127	    0,				/* flags */
1128	    NULL, NULL,			/* lockfunc, lockarg */
1129	    &ring->rx_data_tag);
1130	if (error != 0) {
1131		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1132		goto fail;
1133	}
1134
1135	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1136	if (error != 0) {
1137		device_printf(sc->nfe_dev,
1138		    "could not create Rx DMA spare map\n");
1139		goto fail;
1140	}
1141
1142	/*
1143	 * Pre-allocate Rx buffers and populate Rx ring.
1144	 */
1145	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1146		data = &sc->rxq.data[i];
1147		data->rx_data_map = NULL;
1148		data->m = NULL;
1149		error = bus_dmamap_create(ring->rx_data_tag, 0,
1150		    &data->rx_data_map);
1151		if (error != 0) {
1152			device_printf(sc->nfe_dev,
1153			    "could not create Rx DMA map\n");
1154			goto fail;
1155		}
1156	}
1157
1158fail:
1159	return (error);
1160}
1161
1162
1163static void
1164nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1165{
1166	struct nfe_dmamap_arg ctx;
1167	struct nfe_rx_data *data;
1168	void *desc;
1169	int i, error, descsize;
1170
1171	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1172		return;
1173	if (jumbo_disable != 0) {
1174		device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1175		sc->nfe_jumbo_disable = 1;
1176		return;
1177	}
1178
1179	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1180		desc = ring->jdesc64;
1181		descsize = sizeof (struct nfe_desc64);
1182	} else {
1183		desc = ring->jdesc32;
1184		descsize = sizeof (struct nfe_desc32);
1185	}
1186
1187	ring->jcur = ring->jnext = 0;
1188
1189	/* Create DMA tag for jumbo Rx ring. */
1190	error = bus_dma_tag_create(sc->nfe_parent_tag,
1191	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1192	    BUS_SPACE_MAXADDR,			/* lowaddr */
1193	    BUS_SPACE_MAXADDR,			/* highaddr */
1194	    NULL, NULL,				/* filter, filterarg */
1195	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
1196	    1, 					/* nsegments */
1197	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
1198	    0,					/* flags */
1199	    NULL, NULL,				/* lockfunc, lockarg */
1200	    &ring->jrx_desc_tag);
1201	if (error != 0) {
1202		device_printf(sc->nfe_dev,
1203		    "could not create jumbo ring DMA tag\n");
1204		goto fail;
1205	}
1206
1207	/* Create DMA tag for jumbo Rx buffers. */
1208	error = bus_dma_tag_create(sc->nfe_parent_tag,
1209	    1, 0,				/* alignment, boundary */
1210	    BUS_SPACE_MAXADDR,			/* lowaddr */
1211	    BUS_SPACE_MAXADDR,			/* highaddr */
1212	    NULL, NULL,				/* filter, filterarg */
1213	    MJUM9BYTES,				/* maxsize */
1214	    1,					/* nsegments */
1215	    MJUM9BYTES,				/* maxsegsize */
1216	    0,					/* flags */
1217	    NULL, NULL,				/* lockfunc, lockarg */
1218	    &ring->jrx_data_tag);
1219	if (error != 0) {
1220		device_printf(sc->nfe_dev,
1221		    "could not create jumbo Rx buffer DMA tag\n");
1222		goto fail;
1223	}
1224
1225	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1226	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1227	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1228	if (error != 0) {
1229		device_printf(sc->nfe_dev,
1230		    "could not allocate DMA'able memory for jumbo Rx ring\n");
1231		goto fail;
1232	}
1233	if (sc->nfe_flags & NFE_40BIT_ADDR)
1234		ring->jdesc64 = desc;
1235	else
1236		ring->jdesc32 = desc;
1237
1238	ctx.nfe_busaddr = 0;
1239	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1240	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1241	if (error != 0) {
1242		device_printf(sc->nfe_dev,
1243		    "could not load DMA'able memory for jumbo Rx ring\n");
1244		goto fail;
1245	}
1246	ring->jphysaddr = ctx.nfe_busaddr;
1247
1248	/* Create DMA maps for jumbo Rx buffers. */
1249	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1250	if (error != 0) {
1251		device_printf(sc->nfe_dev,
1252		    "could not create jumbo Rx DMA spare map\n");
1253		goto fail;
1254	}
1255
1256	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1257		data = &sc->jrxq.jdata[i];
1258		data->rx_data_map = NULL;
1259		data->m = NULL;
1260		error = bus_dmamap_create(ring->jrx_data_tag, 0,
1261		    &data->rx_data_map);
1262		if (error != 0) {
1263			device_printf(sc->nfe_dev,
1264			    "could not create jumbo Rx DMA map\n");
1265			goto fail;
1266		}
1267	}
1268
1269	return;
1270
1271fail:
1272	/*
1273	 * Running without jumbo frame support is ok for most cases
1274	 * so don't fail on creating dma tag/map for jumbo frame.
1275	 */
1276	nfe_free_jrx_ring(sc, ring);
1277	device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1278	    "resource shortage\n");
1279	sc->nfe_jumbo_disable = 1;
1280}
1281
1282
1283static int
1284nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1285{
1286	void *desc;
1287	size_t descsize;
1288	int i;
1289
1290	ring->cur = ring->next = 0;
1291	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1292		desc = ring->desc64;
1293		descsize = sizeof (struct nfe_desc64);
1294	} else {
1295		desc = ring->desc32;
1296		descsize = sizeof (struct nfe_desc32);
1297	}
1298	bzero(desc, descsize * NFE_RX_RING_COUNT);
1299	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1300		if (nfe_newbuf(sc, i) != 0)
1301			return (ENOBUFS);
1302	}
1303
1304	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1305	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1306
1307	return (0);
1308}
1309
1310
1311static int
1312nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1313{
1314	void *desc;
1315	size_t descsize;
1316	int i;
1317
1318	ring->jcur = ring->jnext = 0;
1319	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1320		desc = ring->jdesc64;
1321		descsize = sizeof (struct nfe_desc64);
1322	} else {
1323		desc = ring->jdesc32;
1324		descsize = sizeof (struct nfe_desc32);
1325	}
1326	bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1327	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1328		if (nfe_jnewbuf(sc, i) != 0)
1329			return (ENOBUFS);
1330	}
1331
1332	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1333	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1334
1335	return (0);
1336}
1337
1338
1339static void
1340nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1341{
1342	struct nfe_rx_data *data;
1343	void *desc;
1344	int i, descsize;
1345
1346	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1347		desc = ring->desc64;
1348		descsize = sizeof (struct nfe_desc64);
1349	} else {
1350		desc = ring->desc32;
1351		descsize = sizeof (struct nfe_desc32);
1352	}
1353
1354	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1355		data = &ring->data[i];
1356		if (data->rx_data_map != NULL) {
1357			bus_dmamap_destroy(ring->rx_data_tag,
1358			    data->rx_data_map);
1359			data->rx_data_map = NULL;
1360		}
1361		if (data->m != NULL) {
1362			m_freem(data->m);
1363			data->m = NULL;
1364		}
1365	}
1366	if (ring->rx_data_tag != NULL) {
1367		if (ring->rx_spare_map != NULL) {
1368			bus_dmamap_destroy(ring->rx_data_tag,
1369			    ring->rx_spare_map);
1370			ring->rx_spare_map = NULL;
1371		}
1372		bus_dma_tag_destroy(ring->rx_data_tag);
1373		ring->rx_data_tag = NULL;
1374	}
1375
1376	if (desc != NULL) {
1377		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1378		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1379		ring->desc64 = NULL;
1380		ring->desc32 = NULL;
1381		ring->rx_desc_map = NULL;
1382	}
1383	if (ring->rx_desc_tag != NULL) {
1384		bus_dma_tag_destroy(ring->rx_desc_tag);
1385		ring->rx_desc_tag = NULL;
1386	}
1387}
1388
1389
1390static void
1391nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1392{
1393	struct nfe_rx_data *data;
1394	void *desc;
1395	int i, descsize;
1396
1397	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1398		return;
1399
1400	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1401		desc = ring->jdesc64;
1402		descsize = sizeof (struct nfe_desc64);
1403	} else {
1404		desc = ring->jdesc32;
1405		descsize = sizeof (struct nfe_desc32);
1406	}
1407
1408	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1409		data = &ring->jdata[i];
1410		if (data->rx_data_map != NULL) {
1411			bus_dmamap_destroy(ring->jrx_data_tag,
1412			    data->rx_data_map);
1413			data->rx_data_map = NULL;
1414		}
1415		if (data->m != NULL) {
1416			m_freem(data->m);
1417			data->m = NULL;
1418		}
1419	}
1420	if (ring->jrx_data_tag != NULL) {
1421		if (ring->jrx_spare_map != NULL) {
1422			bus_dmamap_destroy(ring->jrx_data_tag,
1423			    ring->jrx_spare_map);
1424			ring->jrx_spare_map = NULL;
1425		}
1426		bus_dma_tag_destroy(ring->jrx_data_tag);
1427		ring->jrx_data_tag = NULL;
1428	}
1429
1430	if (desc != NULL) {
1431		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1432		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1433		ring->jdesc64 = NULL;
1434		ring->jdesc32 = NULL;
1435		ring->jrx_desc_map = NULL;
1436	}
1437
1438	if (ring->jrx_desc_tag != NULL) {
1439		bus_dma_tag_destroy(ring->jrx_desc_tag);
1440		ring->jrx_desc_tag = NULL;
1441	}
1442}
1443
1444
1445static int
1446nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1447{
1448	struct nfe_dmamap_arg ctx;
1449	int i, error;
1450	void *desc;
1451	int descsize;
1452
1453	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1454		desc = ring->desc64;
1455		descsize = sizeof (struct nfe_desc64);
1456	} else {
1457		desc = ring->desc32;
1458		descsize = sizeof (struct nfe_desc32);
1459	}
1460
1461	ring->queued = 0;
1462	ring->cur = ring->next = 0;
1463
1464	error = bus_dma_tag_create(sc->nfe_parent_tag,
1465	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1466	    BUS_SPACE_MAXADDR,			/* lowaddr */
1467	    BUS_SPACE_MAXADDR,			/* highaddr */
1468	    NULL, NULL,				/* filter, filterarg */
1469	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1470	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
1471	    0,					/* flags */
1472	    NULL, NULL,				/* lockfunc, lockarg */
1473	    &ring->tx_desc_tag);
1474	if (error != 0) {
1475		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1476		goto fail;
1477	}
1478
1479	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1480	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1481	if (error != 0) {
1482		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1483		goto fail;
1484	}
1485	if (sc->nfe_flags & NFE_40BIT_ADDR)
1486		ring->desc64 = desc;
1487	else
1488		ring->desc32 = desc;
1489
1490	ctx.nfe_busaddr = 0;
1491	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1492	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1493	if (error != 0) {
1494		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1495		goto fail;
1496	}
1497	ring->physaddr = ctx.nfe_busaddr;
1498
1499	error = bus_dma_tag_create(sc->nfe_parent_tag,
1500	    1, 0,
1501	    BUS_SPACE_MAXADDR,
1502	    BUS_SPACE_MAXADDR,
1503	    NULL, NULL,
1504	    NFE_TSO_MAXSIZE,
1505	    NFE_MAX_SCATTER,
1506	    NFE_TSO_MAXSGSIZE,
1507	    0,
1508	    NULL, NULL,
1509	    &ring->tx_data_tag);
1510	if (error != 0) {
1511		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1512		goto fail;
1513	}
1514
1515	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1516		error = bus_dmamap_create(ring->tx_data_tag, 0,
1517		    &ring->data[i].tx_data_map);
1518		if (error != 0) {
1519			device_printf(sc->nfe_dev,
1520			    "could not create Tx DMA map\n");
1521			goto fail;
1522		}
1523	}
1524
1525fail:
1526	return (error);
1527}
1528
1529
1530static void
1531nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1532{
1533	void *desc;
1534	size_t descsize;
1535
1536	sc->nfe_force_tx = 0;
1537	ring->queued = 0;
1538	ring->cur = ring->next = 0;
1539	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1540		desc = ring->desc64;
1541		descsize = sizeof (struct nfe_desc64);
1542	} else {
1543		desc = ring->desc32;
1544		descsize = sizeof (struct nfe_desc32);
1545	}
1546	bzero(desc, descsize * NFE_TX_RING_COUNT);
1547
1548	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1549	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1550}
1551
1552
1553static void
1554nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1555{
1556	struct nfe_tx_data *data;
1557	void *desc;
1558	int i, descsize;
1559
1560	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1561		desc = ring->desc64;
1562		descsize = sizeof (struct nfe_desc64);
1563	} else {
1564		desc = ring->desc32;
1565		descsize = sizeof (struct nfe_desc32);
1566	}
1567
1568	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1569		data = &ring->data[i];
1570
1571		if (data->m != NULL) {
1572			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1573			    BUS_DMASYNC_POSTWRITE);
1574			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1575			m_freem(data->m);
1576			data->m = NULL;
1577		}
1578		if (data->tx_data_map != NULL) {
1579			bus_dmamap_destroy(ring->tx_data_tag,
1580			    data->tx_data_map);
1581			data->tx_data_map = NULL;
1582		}
1583	}
1584
1585	if (ring->tx_data_tag != NULL) {
1586		bus_dma_tag_destroy(ring->tx_data_tag);
1587		ring->tx_data_tag = NULL;
1588	}
1589
1590	if (desc != NULL) {
1591		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1592		    BUS_DMASYNC_POSTWRITE);
1593		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1594		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1595		ring->desc64 = NULL;
1596		ring->desc32 = NULL;
1597		ring->tx_desc_map = NULL;
1598		bus_dma_tag_destroy(ring->tx_desc_tag);
1599		ring->tx_desc_tag = NULL;
1600	}
1601}
1602
1603#ifdef DEVICE_POLLING
1604static poll_handler_t nfe_poll;
1605
1606
1607static int
1608nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1609{
1610	struct nfe_softc *sc = ifp->if_softc;
1611	uint32_t r;
1612	int rx_npkts = 0;
1613
1614	NFE_LOCK(sc);
1615
1616	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1617		NFE_UNLOCK(sc);
1618		return (rx_npkts);
1619	}
1620
1621	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1622		rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1623	else
1624		rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1625	nfe_txeof(sc);
1626	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1627		nfe_start_locked(ifp);
1628
1629	if (cmd == POLL_AND_CHECK_STATUS) {
1630		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1631			NFE_UNLOCK(sc);
1632			return (rx_npkts);
1633		}
1634		NFE_WRITE(sc, sc->nfe_irq_status, r);
1635
1636		if (r & NFE_IRQ_LINK) {
1637			NFE_READ(sc, NFE_PHY_STATUS);
1638			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1639			DPRINTF(sc, "link state changed\n");
1640		}
1641	}
1642	NFE_UNLOCK(sc);
1643	return (rx_npkts);
1644}
1645#endif /* DEVICE_POLLING */
1646
1647static void
1648nfe_set_intr(struct nfe_softc *sc)
1649{
1650
1651	if (sc->nfe_msi != 0)
1652		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1653}
1654
1655
1656/* In MSIX, a write to mask reegisters behaves as XOR. */
1657static __inline void
1658nfe_enable_intr(struct nfe_softc *sc)
1659{
1660
1661	if (sc->nfe_msix != 0) {
1662		/* XXX Should have a better way to enable interrupts! */
1663		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1664			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1665	} else
1666		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1667}
1668
1669
1670static __inline void
1671nfe_disable_intr(struct nfe_softc *sc)
1672{
1673
1674	if (sc->nfe_msix != 0) {
1675		/* XXX Should have a better way to disable interrupts! */
1676		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1677			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1678	} else
1679		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1680}
1681
1682
1683static int
1684nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1685{
1686	struct nfe_softc *sc;
1687	struct ifreq *ifr;
1688	struct mii_data *mii;
1689	int error, init, mask;
1690
1691	sc = ifp->if_softc;
1692	ifr = (struct ifreq *) data;
1693	error = 0;
1694	init = 0;
1695	switch (cmd) {
1696	case SIOCSIFMTU:
1697		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1698			error = EINVAL;
1699		else if (ifp->if_mtu != ifr->ifr_mtu) {
1700			if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1701			    (sc->nfe_jumbo_disable != 0)) &&
1702			    ifr->ifr_mtu > ETHERMTU)
1703				error = EINVAL;
1704			else {
1705				NFE_LOCK(sc);
1706				ifp->if_mtu = ifr->ifr_mtu;
1707				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1708					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1709					nfe_init_locked(sc);
1710				}
1711				NFE_UNLOCK(sc);
1712			}
1713		}
1714		break;
1715	case SIOCSIFFLAGS:
1716		NFE_LOCK(sc);
1717		if (ifp->if_flags & IFF_UP) {
1718			/*
1719			 * If only the PROMISC or ALLMULTI flag changes, then
1720			 * don't do a full re-init of the chip, just update
1721			 * the Rx filter.
1722			 */
1723			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1724			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1725			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1726				nfe_setmulti(sc);
1727			else
1728				nfe_init_locked(sc);
1729		} else {
1730			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1731				nfe_stop(ifp);
1732		}
1733		sc->nfe_if_flags = ifp->if_flags;
1734		NFE_UNLOCK(sc);
1735		error = 0;
1736		break;
1737	case SIOCADDMULTI:
1738	case SIOCDELMULTI:
1739		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1740			NFE_LOCK(sc);
1741			nfe_setmulti(sc);
1742			NFE_UNLOCK(sc);
1743			error = 0;
1744		}
1745		break;
1746	case SIOCSIFMEDIA:
1747	case SIOCGIFMEDIA:
1748		mii = device_get_softc(sc->nfe_miibus);
1749		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1750		break;
1751	case SIOCSIFCAP:
1752		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1753#ifdef DEVICE_POLLING
1754		if ((mask & IFCAP_POLLING) != 0) {
1755			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1756				error = ether_poll_register(nfe_poll, ifp);
1757				if (error)
1758					break;
1759				NFE_LOCK(sc);
1760				nfe_disable_intr(sc);
1761				ifp->if_capenable |= IFCAP_POLLING;
1762				NFE_UNLOCK(sc);
1763			} else {
1764				error = ether_poll_deregister(ifp);
1765				/* Enable interrupt even in error case */
1766				NFE_LOCK(sc);
1767				nfe_enable_intr(sc);
1768				ifp->if_capenable &= ~IFCAP_POLLING;
1769				NFE_UNLOCK(sc);
1770			}
1771		}
1772#endif /* DEVICE_POLLING */
1773		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1774		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1775			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1776		if ((mask & IFCAP_TXCSUM) != 0 &&
1777		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1778			ifp->if_capenable ^= IFCAP_TXCSUM;
1779			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1780				ifp->if_hwassist |= NFE_CSUM_FEATURES;
1781			else
1782				ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1783		}
1784		if ((mask & IFCAP_RXCSUM) != 0 &&
1785		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1786			ifp->if_capenable ^= IFCAP_RXCSUM;
1787			init++;
1788		}
1789		if ((mask & IFCAP_TSO4) != 0 &&
1790		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1791			ifp->if_capenable ^= IFCAP_TSO4;
1792			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1793				ifp->if_hwassist |= CSUM_TSO;
1794			else
1795				ifp->if_hwassist &= ~CSUM_TSO;
1796		}
1797		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1798		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1799			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1800		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1801		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1802			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1803			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1804				ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1805			init++;
1806		}
1807		/*
1808		 * XXX
1809		 * It seems that VLAN stripping requires Rx checksum offload.
1810		 * Unfortunately FreeBSD has no way to disable only Rx side
1811		 * VLAN stripping. So when we know Rx checksum offload is
1812		 * disabled turn entire hardware VLAN assist off.
1813		 */
1814		if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
1815			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
1816				init++;
1817			ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
1818			    IFCAP_VLAN_HWTSO);
1819		}
1820		if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1821			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1822			nfe_init(sc);
1823		}
1824		VLAN_CAPABILITIES(ifp);
1825		break;
1826	default:
1827		error = ether_ioctl(ifp, cmd, data);
1828		break;
1829	}
1830
1831	return (error);
1832}
1833
1834
1835static int
1836nfe_intr(void *arg)
1837{
1838	struct nfe_softc *sc;
1839	uint32_t status;
1840
1841	sc = (struct nfe_softc *)arg;
1842
1843	status = NFE_READ(sc, sc->nfe_irq_status);
1844	if (status == 0 || status == 0xffffffff)
1845		return (FILTER_STRAY);
1846	nfe_disable_intr(sc);
1847	taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1848
1849	return (FILTER_HANDLED);
1850}
1851
1852
1853static void
1854nfe_int_task(void *arg, int pending)
1855{
1856	struct nfe_softc *sc = arg;
1857	struct ifnet *ifp = sc->nfe_ifp;
1858	uint32_t r;
1859	int domore;
1860
1861	NFE_LOCK(sc);
1862
1863	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1864		nfe_enable_intr(sc);
1865		NFE_UNLOCK(sc);
1866		return;	/* not for us */
1867	}
1868	NFE_WRITE(sc, sc->nfe_irq_status, r);
1869
1870	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1871
1872#ifdef DEVICE_POLLING
1873	if (ifp->if_capenable & IFCAP_POLLING) {
1874		NFE_UNLOCK(sc);
1875		return;
1876	}
1877#endif
1878
1879	if (r & NFE_IRQ_LINK) {
1880		NFE_READ(sc, NFE_PHY_STATUS);
1881		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1882		DPRINTF(sc, "link state changed\n");
1883	}
1884
1885	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1886		NFE_UNLOCK(sc);
1887		nfe_disable_intr(sc);
1888		return;
1889	}
1890
1891	domore = 0;
1892	/* check Rx ring */
1893	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1894		domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1895	else
1896		domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1897	/* check Tx ring */
1898	nfe_txeof(sc);
1899
1900	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1901		nfe_start_locked(ifp);
1902
1903	NFE_UNLOCK(sc);
1904
1905	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1906		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1907		return;
1908	}
1909
1910	/* Reenable interrupts. */
1911	nfe_enable_intr(sc);
1912}
1913
1914
1915static __inline void
1916nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1917{
1918	struct nfe_desc32 *desc32;
1919	struct nfe_desc64 *desc64;
1920	struct nfe_rx_data *data;
1921	struct mbuf *m;
1922
1923	data = &sc->rxq.data[idx];
1924	m = data->m;
1925
1926	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1927		desc64 = &sc->rxq.desc64[idx];
1928		/* VLAN packet may have overwritten it. */
1929		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1930		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1931		desc64->length = htole16(m->m_len);
1932		desc64->flags = htole16(NFE_RX_READY);
1933	} else {
1934		desc32 = &sc->rxq.desc32[idx];
1935		desc32->length = htole16(m->m_len);
1936		desc32->flags = htole16(NFE_RX_READY);
1937	}
1938}
1939
1940
1941static __inline void
1942nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1943{
1944	struct nfe_desc32 *desc32;
1945	struct nfe_desc64 *desc64;
1946	struct nfe_rx_data *data;
1947	struct mbuf *m;
1948
1949	data = &sc->jrxq.jdata[idx];
1950	m = data->m;
1951
1952	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1953		desc64 = &sc->jrxq.jdesc64[idx];
1954		/* VLAN packet may have overwritten it. */
1955		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1956		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1957		desc64->length = htole16(m->m_len);
1958		desc64->flags = htole16(NFE_RX_READY);
1959	} else {
1960		desc32 = &sc->jrxq.jdesc32[idx];
1961		desc32->length = htole16(m->m_len);
1962		desc32->flags = htole16(NFE_RX_READY);
1963	}
1964}
1965
1966
1967static int
1968nfe_newbuf(struct nfe_softc *sc, int idx)
1969{
1970	struct nfe_rx_data *data;
1971	struct nfe_desc32 *desc32;
1972	struct nfe_desc64 *desc64;
1973	struct mbuf *m;
1974	bus_dma_segment_t segs[1];
1975	bus_dmamap_t map;
1976	int nsegs;
1977
1978	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1979	if (m == NULL)
1980		return (ENOBUFS);
1981
1982	m->m_len = m->m_pkthdr.len = MCLBYTES;
1983	m_adj(m, ETHER_ALIGN);
1984
1985	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1986	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1987		m_freem(m);
1988		return (ENOBUFS);
1989	}
1990	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1991
1992	data = &sc->rxq.data[idx];
1993	if (data->m != NULL) {
1994		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1995		    BUS_DMASYNC_POSTREAD);
1996		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1997	}
1998	map = data->rx_data_map;
1999	data->rx_data_map = sc->rxq.rx_spare_map;
2000	sc->rxq.rx_spare_map = map;
2001	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2002	    BUS_DMASYNC_PREREAD);
2003	data->paddr = segs[0].ds_addr;
2004	data->m = m;
2005	/* update mapping address in h/w descriptor */
2006	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2007		desc64 = &sc->rxq.desc64[idx];
2008		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2009		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2010		desc64->length = htole16(segs[0].ds_len);
2011		desc64->flags = htole16(NFE_RX_READY);
2012	} else {
2013		desc32 = &sc->rxq.desc32[idx];
2014		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2015		desc32->length = htole16(segs[0].ds_len);
2016		desc32->flags = htole16(NFE_RX_READY);
2017	}
2018
2019	return (0);
2020}
2021
2022
2023static int
2024nfe_jnewbuf(struct nfe_softc *sc, int idx)
2025{
2026	struct nfe_rx_data *data;
2027	struct nfe_desc32 *desc32;
2028	struct nfe_desc64 *desc64;
2029	struct mbuf *m;
2030	bus_dma_segment_t segs[1];
2031	bus_dmamap_t map;
2032	int nsegs;
2033
2034	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2035	if (m == NULL)
2036		return (ENOBUFS);
2037	if ((m->m_flags & M_EXT) == 0) {
2038		m_freem(m);
2039		return (ENOBUFS);
2040	}
2041	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2042	m_adj(m, ETHER_ALIGN);
2043
2044	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2045	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2046		m_freem(m);
2047		return (ENOBUFS);
2048	}
2049	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2050
2051	data = &sc->jrxq.jdata[idx];
2052	if (data->m != NULL) {
2053		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2054		    BUS_DMASYNC_POSTREAD);
2055		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2056	}
2057	map = data->rx_data_map;
2058	data->rx_data_map = sc->jrxq.jrx_spare_map;
2059	sc->jrxq.jrx_spare_map = map;
2060	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2061	    BUS_DMASYNC_PREREAD);
2062	data->paddr = segs[0].ds_addr;
2063	data->m = m;
2064	/* update mapping address in h/w descriptor */
2065	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2066		desc64 = &sc->jrxq.jdesc64[idx];
2067		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2068		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2069		desc64->length = htole16(segs[0].ds_len);
2070		desc64->flags = htole16(NFE_RX_READY);
2071	} else {
2072		desc32 = &sc->jrxq.jdesc32[idx];
2073		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2074		desc32->length = htole16(segs[0].ds_len);
2075		desc32->flags = htole16(NFE_RX_READY);
2076	}
2077
2078	return (0);
2079}
2080
2081
2082static int
2083nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2084{
2085	struct ifnet *ifp = sc->nfe_ifp;
2086	struct nfe_desc32 *desc32;
2087	struct nfe_desc64 *desc64;
2088	struct nfe_rx_data *data;
2089	struct mbuf *m;
2090	uint16_t flags;
2091	int len, prog, rx_npkts;
2092	uint32_t vtag = 0;
2093
2094	rx_npkts = 0;
2095	NFE_LOCK_ASSERT(sc);
2096
2097	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2098	    BUS_DMASYNC_POSTREAD);
2099
2100	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2101		if (count <= 0)
2102			break;
2103		count--;
2104
2105		data = &sc->rxq.data[sc->rxq.cur];
2106
2107		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2108			desc64 = &sc->rxq.desc64[sc->rxq.cur];
2109			vtag = le32toh(desc64->physaddr[1]);
2110			flags = le16toh(desc64->flags);
2111			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2112		} else {
2113			desc32 = &sc->rxq.desc32[sc->rxq.cur];
2114			flags = le16toh(desc32->flags);
2115			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2116		}
2117
2118		if (flags & NFE_RX_READY)
2119			break;
2120		prog++;
2121		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2122			if (!(flags & NFE_RX_VALID_V1)) {
2123				ifp->if_ierrors++;
2124				nfe_discard_rxbuf(sc, sc->rxq.cur);
2125				continue;
2126			}
2127			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2128				flags &= ~NFE_RX_ERROR;
2129				len--;	/* fix buffer length */
2130			}
2131		} else {
2132			if (!(flags & NFE_RX_VALID_V2)) {
2133				ifp->if_ierrors++;
2134				nfe_discard_rxbuf(sc, sc->rxq.cur);
2135				continue;
2136			}
2137
2138			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2139				flags &= ~NFE_RX_ERROR;
2140				len--;	/* fix buffer length */
2141			}
2142		}
2143
2144		if (flags & NFE_RX_ERROR) {
2145			ifp->if_ierrors++;
2146			nfe_discard_rxbuf(sc, sc->rxq.cur);
2147			continue;
2148		}
2149
2150		m = data->m;
2151		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2152			ifp->if_iqdrops++;
2153			nfe_discard_rxbuf(sc, sc->rxq.cur);
2154			continue;
2155		}
2156
2157		if ((vtag & NFE_RX_VTAG) != 0 &&
2158		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2159			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2160			m->m_flags |= M_VLANTAG;
2161		}
2162
2163		m->m_pkthdr.len = m->m_len = len;
2164		m->m_pkthdr.rcvif = ifp;
2165
2166		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2167			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2168				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2169				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2170				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2171				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2172					m->m_pkthdr.csum_flags |=
2173					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2174					m->m_pkthdr.csum_data = 0xffff;
2175				}
2176			}
2177		}
2178
2179		ifp->if_ipackets++;
2180
2181		NFE_UNLOCK(sc);
2182		(*ifp->if_input)(ifp, m);
2183		NFE_LOCK(sc);
2184		rx_npkts++;
2185	}
2186
2187	if (prog > 0)
2188		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2189		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2190
2191	if (rx_npktsp != NULL)
2192		*rx_npktsp = rx_npkts;
2193	return (count > 0 ? 0 : EAGAIN);
2194}
2195
2196
2197static int
2198nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2199{
2200	struct ifnet *ifp = sc->nfe_ifp;
2201	struct nfe_desc32 *desc32;
2202	struct nfe_desc64 *desc64;
2203	struct nfe_rx_data *data;
2204	struct mbuf *m;
2205	uint16_t flags;
2206	int len, prog, rx_npkts;
2207	uint32_t vtag = 0;
2208
2209	rx_npkts = 0;
2210	NFE_LOCK_ASSERT(sc);
2211
2212	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2213	    BUS_DMASYNC_POSTREAD);
2214
2215	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2216	    vtag = 0) {
2217		if (count <= 0)
2218			break;
2219		count--;
2220
2221		data = &sc->jrxq.jdata[sc->jrxq.jcur];
2222
2223		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2224			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2225			vtag = le32toh(desc64->physaddr[1]);
2226			flags = le16toh(desc64->flags);
2227			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2228		} else {
2229			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2230			flags = le16toh(desc32->flags);
2231			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2232		}
2233
2234		if (flags & NFE_RX_READY)
2235			break;
2236		prog++;
2237		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2238			if (!(flags & NFE_RX_VALID_V1)) {
2239				ifp->if_ierrors++;
2240				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2241				continue;
2242			}
2243			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2244				flags &= ~NFE_RX_ERROR;
2245				len--;	/* fix buffer length */
2246			}
2247		} else {
2248			if (!(flags & NFE_RX_VALID_V2)) {
2249				ifp->if_ierrors++;
2250				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2251				continue;
2252			}
2253
2254			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2255				flags &= ~NFE_RX_ERROR;
2256				len--;	/* fix buffer length */
2257			}
2258		}
2259
2260		if (flags & NFE_RX_ERROR) {
2261			ifp->if_ierrors++;
2262			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2263			continue;
2264		}
2265
2266		m = data->m;
2267		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2268			ifp->if_iqdrops++;
2269			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2270			continue;
2271		}
2272
2273		if ((vtag & NFE_RX_VTAG) != 0 &&
2274		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2275			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2276			m->m_flags |= M_VLANTAG;
2277		}
2278
2279		m->m_pkthdr.len = m->m_len = len;
2280		m->m_pkthdr.rcvif = ifp;
2281
2282		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2283			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2284				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2285				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2286				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2287				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2288					m->m_pkthdr.csum_flags |=
2289					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2290					m->m_pkthdr.csum_data = 0xffff;
2291				}
2292			}
2293		}
2294
2295		ifp->if_ipackets++;
2296
2297		NFE_UNLOCK(sc);
2298		(*ifp->if_input)(ifp, m);
2299		NFE_LOCK(sc);
2300		rx_npkts++;
2301	}
2302
2303	if (prog > 0)
2304		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2305		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2306
2307	if (rx_npktsp != NULL)
2308		*rx_npktsp = rx_npkts;
2309	return (count > 0 ? 0 : EAGAIN);
2310}
2311
2312
2313static void
2314nfe_txeof(struct nfe_softc *sc)
2315{
2316	struct ifnet *ifp = sc->nfe_ifp;
2317	struct nfe_desc32 *desc32;
2318	struct nfe_desc64 *desc64;
2319	struct nfe_tx_data *data = NULL;
2320	uint16_t flags;
2321	int cons, prog;
2322
2323	NFE_LOCK_ASSERT(sc);
2324
2325	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2326	    BUS_DMASYNC_POSTREAD);
2327
2328	prog = 0;
2329	for (cons = sc->txq.next; cons != sc->txq.cur;
2330	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
2331		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2332			desc64 = &sc->txq.desc64[cons];
2333			flags = le16toh(desc64->flags);
2334		} else {
2335			desc32 = &sc->txq.desc32[cons];
2336			flags = le16toh(desc32->flags);
2337		}
2338
2339		if (flags & NFE_TX_VALID)
2340			break;
2341
2342		prog++;
2343		sc->txq.queued--;
2344		data = &sc->txq.data[cons];
2345
2346		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2347			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2348				continue;
2349			if ((flags & NFE_TX_ERROR_V1) != 0) {
2350				device_printf(sc->nfe_dev,
2351				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2352
2353				ifp->if_oerrors++;
2354			} else
2355				ifp->if_opackets++;
2356		} else {
2357			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2358				continue;
2359			if ((flags & NFE_TX_ERROR_V2) != 0) {
2360				device_printf(sc->nfe_dev,
2361				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2362				ifp->if_oerrors++;
2363			} else
2364				ifp->if_opackets++;
2365		}
2366
2367		/* last fragment of the mbuf chain transmitted */
2368		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2369		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2370		    BUS_DMASYNC_POSTWRITE);
2371		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2372		m_freem(data->m);
2373		data->m = NULL;
2374	}
2375
2376	if (prog > 0) {
2377		sc->nfe_force_tx = 0;
2378		sc->txq.next = cons;
2379		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2380		if (sc->txq.queued == 0)
2381			sc->nfe_watchdog_timer = 0;
2382	}
2383}
2384
2385static int
2386nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2387{
2388	struct nfe_desc32 *desc32 = NULL;
2389	struct nfe_desc64 *desc64 = NULL;
2390	bus_dmamap_t map;
2391	bus_dma_segment_t segs[NFE_MAX_SCATTER];
2392	int error, i, nsegs, prod, si;
2393	uint32_t tso_segsz;
2394	uint16_t cflags, flags;
2395	struct mbuf *m;
2396
2397	prod = si = sc->txq.cur;
2398	map = sc->txq.data[prod].tx_data_map;
2399
2400	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2401	    &nsegs, BUS_DMA_NOWAIT);
2402	if (error == EFBIG) {
2403		m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2404		if (m == NULL) {
2405			m_freem(*m_head);
2406			*m_head = NULL;
2407			return (ENOBUFS);
2408		}
2409		*m_head = m;
2410		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2411		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2412		if (error != 0) {
2413			m_freem(*m_head);
2414			*m_head = NULL;
2415			return (ENOBUFS);
2416		}
2417	} else if (error != 0)
2418		return (error);
2419	if (nsegs == 0) {
2420		m_freem(*m_head);
2421		*m_head = NULL;
2422		return (EIO);
2423	}
2424
2425	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2426		bus_dmamap_unload(sc->txq.tx_data_tag, map);
2427		return (ENOBUFS);
2428	}
2429
2430	m = *m_head;
2431	cflags = flags = 0;
2432	tso_segsz = 0;
2433	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2434		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2435		    NFE_TX_TSO_SHIFT;
2436		cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2437		cflags |= NFE_TX_TSO;
2438	} else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2439		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2440			cflags |= NFE_TX_IP_CSUM;
2441		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2442			cflags |= NFE_TX_TCP_UDP_CSUM;
2443		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2444			cflags |= NFE_TX_TCP_UDP_CSUM;
2445	}
2446
2447	for (i = 0; i < nsegs; i++) {
2448		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2449			desc64 = &sc->txq.desc64[prod];
2450			desc64->physaddr[0] =
2451			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
2452			desc64->physaddr[1] =
2453			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2454			desc64->vtag = 0;
2455			desc64->length = htole16(segs[i].ds_len - 1);
2456			desc64->flags = htole16(flags);
2457		} else {
2458			desc32 = &sc->txq.desc32[prod];
2459			desc32->physaddr =
2460			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2461			desc32->length = htole16(segs[i].ds_len - 1);
2462			desc32->flags = htole16(flags);
2463		}
2464
2465		/*
2466		 * Setting of the valid bit in the first descriptor is
2467		 * deferred until the whole chain is fully setup.
2468		 */
2469		flags |= NFE_TX_VALID;
2470
2471		sc->txq.queued++;
2472		NFE_INC(prod, NFE_TX_RING_COUNT);
2473	}
2474
2475	/*
2476	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2477	 * csum flags, vtag and TSO belong to the first fragment only.
2478	 */
2479	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2480		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2481		desc64 = &sc->txq.desc64[si];
2482		if ((m->m_flags & M_VLANTAG) != 0)
2483			desc64->vtag = htole32(NFE_TX_VTAG |
2484			    m->m_pkthdr.ether_vtag);
2485		if (tso_segsz != 0) {
2486			/*
2487			 * XXX
2488			 * The following indicates the descriptor element
2489			 * is a 32bit quantity.
2490			 */
2491			desc64->length |= htole16((uint16_t)tso_segsz);
2492			desc64->flags |= htole16(tso_segsz >> 16);
2493		}
2494		/*
2495		 * finally, set the valid/checksum/TSO bit in the first
2496		 * descriptor.
2497		 */
2498		desc64->flags |= htole16(NFE_TX_VALID | cflags);
2499	} else {
2500		if (sc->nfe_flags & NFE_JUMBO_SUP)
2501			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2502		else
2503			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2504		desc32 = &sc->txq.desc32[si];
2505		if (tso_segsz != 0) {
2506			/*
2507			 * XXX
2508			 * The following indicates the descriptor element
2509			 * is a 32bit quantity.
2510			 */
2511			desc32->length |= htole16((uint16_t)tso_segsz);
2512			desc32->flags |= htole16(tso_segsz >> 16);
2513		}
2514		/*
2515		 * finally, set the valid/checksum/TSO bit in the first
2516		 * descriptor.
2517		 */
2518		desc32->flags |= htole16(NFE_TX_VALID | cflags);
2519	}
2520
2521	sc->txq.cur = prod;
2522	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2523	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2524	sc->txq.data[prod].tx_data_map = map;
2525	sc->txq.data[prod].m = m;
2526
2527	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2528
2529	return (0);
2530}
2531
2532
2533static void
2534nfe_setmulti(struct nfe_softc *sc)
2535{
2536	struct ifnet *ifp = sc->nfe_ifp;
2537	struct ifmultiaddr *ifma;
2538	int i;
2539	uint32_t filter;
2540	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2541	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2542		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2543	};
2544
2545	NFE_LOCK_ASSERT(sc);
2546
2547	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2548		bzero(addr, ETHER_ADDR_LEN);
2549		bzero(mask, ETHER_ADDR_LEN);
2550		goto done;
2551	}
2552
2553	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2554	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2555
2556	if_maddr_rlock(ifp);
2557	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2558		u_char *addrp;
2559
2560		if (ifma->ifma_addr->sa_family != AF_LINK)
2561			continue;
2562
2563		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2564		for (i = 0; i < ETHER_ADDR_LEN; i++) {
2565			u_int8_t mcaddr = addrp[i];
2566			addr[i] &= mcaddr;
2567			mask[i] &= ~mcaddr;
2568		}
2569	}
2570	if_maddr_runlock(ifp);
2571
2572	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2573		mask[i] |= addr[i];
2574	}
2575
2576done:
2577	addr[0] |= 0x01;	/* make sure multicast bit is set */
2578
2579	NFE_WRITE(sc, NFE_MULTIADDR_HI,
2580	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2581	NFE_WRITE(sc, NFE_MULTIADDR_LO,
2582	    addr[5] <<  8 | addr[4]);
2583	NFE_WRITE(sc, NFE_MULTIMASK_HI,
2584	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2585	NFE_WRITE(sc, NFE_MULTIMASK_LO,
2586	    mask[5] <<  8 | mask[4]);
2587
2588	filter = NFE_READ(sc, NFE_RXFILTER);
2589	filter &= NFE_PFF_RX_PAUSE;
2590	filter |= NFE_RXFILTER_MAGIC;
2591	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2592	NFE_WRITE(sc, NFE_RXFILTER, filter);
2593}
2594
2595
2596static void
2597nfe_start(struct ifnet *ifp)
2598{
2599	struct nfe_softc *sc = ifp->if_softc;
2600
2601	NFE_LOCK(sc);
2602	nfe_start_locked(ifp);
2603	NFE_UNLOCK(sc);
2604}
2605
2606static void
2607nfe_start_locked(struct ifnet *ifp)
2608{
2609	struct nfe_softc *sc = ifp->if_softc;
2610	struct mbuf *m0;
2611	int enq;
2612
2613	NFE_LOCK_ASSERT(sc);
2614
2615	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2616	    IFF_DRV_RUNNING || sc->nfe_link == 0)
2617		return;
2618
2619	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2620		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2621		if (m0 == NULL)
2622			break;
2623
2624		if (nfe_encap(sc, &m0) != 0) {
2625			if (m0 == NULL)
2626				break;
2627			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2628			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2629			break;
2630		}
2631		enq++;
2632		ETHER_BPF_MTAP(ifp, m0);
2633	}
2634
2635	if (enq > 0) {
2636		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2637		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2638
2639		/* kick Tx */
2640		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2641
2642		/*
2643		 * Set a timeout in case the chip goes out to lunch.
2644		 */
2645		sc->nfe_watchdog_timer = 5;
2646	}
2647}
2648
2649
2650static void
2651nfe_watchdog(struct ifnet *ifp)
2652{
2653	struct nfe_softc *sc = ifp->if_softc;
2654
2655	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2656		return;
2657
2658	/* Check if we've lost Tx completion interrupt. */
2659	nfe_txeof(sc);
2660	if (sc->txq.queued == 0) {
2661		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2662		    "-- recovering\n");
2663		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2664			nfe_start_locked(ifp);
2665		return;
2666	}
2667	/* Check if we've lost start Tx command. */
2668	sc->nfe_force_tx++;
2669	if (sc->nfe_force_tx <= 3) {
2670		/*
2671		 * If this is the case for watchdog timeout, the following
2672		 * code should go to nfe_txeof().
2673		 */
2674		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2675		return;
2676	}
2677	sc->nfe_force_tx = 0;
2678
2679	if_printf(ifp, "watchdog timeout\n");
2680
2681	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2682	ifp->if_oerrors++;
2683	nfe_init_locked(sc);
2684}
2685
2686
2687static void
2688nfe_init(void *xsc)
2689{
2690	struct nfe_softc *sc = xsc;
2691
2692	NFE_LOCK(sc);
2693	nfe_init_locked(sc);
2694	NFE_UNLOCK(sc);
2695}
2696
2697
2698static void
2699nfe_init_locked(void *xsc)
2700{
2701	struct nfe_softc *sc = xsc;
2702	struct ifnet *ifp = sc->nfe_ifp;
2703	struct mii_data *mii;
2704	uint32_t val;
2705	int error;
2706
2707	NFE_LOCK_ASSERT(sc);
2708
2709	mii = device_get_softc(sc->nfe_miibus);
2710
2711	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2712		return;
2713
2714	nfe_stop(ifp);
2715
2716	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2717
2718	nfe_init_tx_ring(sc, &sc->txq);
2719	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2720		error = nfe_init_jrx_ring(sc, &sc->jrxq);
2721	else
2722		error = nfe_init_rx_ring(sc, &sc->rxq);
2723	if (error != 0) {
2724		device_printf(sc->nfe_dev,
2725		    "initialization failed: no memory for rx buffers\n");
2726		nfe_stop(ifp);
2727		return;
2728	}
2729
2730	val = 0;
2731	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2732		val |= NFE_MAC_ADDR_INORDER;
2733	NFE_WRITE(sc, NFE_TX_UNK, val);
2734	NFE_WRITE(sc, NFE_STATUS, 0);
2735
2736	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2737		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2738
2739	sc->rxtxctl = NFE_RXTX_BIT2;
2740	if (sc->nfe_flags & NFE_40BIT_ADDR)
2741		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2742	else if (sc->nfe_flags & NFE_JUMBO_SUP)
2743		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2744
2745	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2746		sc->rxtxctl |= NFE_RXTX_RXCSUM;
2747	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2748		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2749
2750	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2751	DELAY(10);
2752	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2753
2754	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2755		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2756	else
2757		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2758
2759	NFE_WRITE(sc, NFE_SETUP_R6, 0);
2760
2761	/* set MAC address */
2762	nfe_set_macaddr(sc, IF_LLADDR(ifp));
2763
2764	/* tell MAC where rings are in memory */
2765	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2766		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2767		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
2768		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2769		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
2770	} else {
2771		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2772		    NFE_ADDR_HI(sc->rxq.physaddr));
2773		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2774		    NFE_ADDR_LO(sc->rxq.physaddr));
2775	}
2776	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2777	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2778
2779	NFE_WRITE(sc, NFE_RING_SIZE,
2780	    (NFE_RX_RING_COUNT - 1) << 16 |
2781	    (NFE_TX_RING_COUNT - 1));
2782
2783	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2784
2785	/* force MAC to wakeup */
2786	val = NFE_READ(sc, NFE_PWR_STATE);
2787	if ((val & NFE_PWR_WAKEUP) == 0)
2788		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2789	DELAY(10);
2790	val = NFE_READ(sc, NFE_PWR_STATE);
2791	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2792
2793#if 1
2794	/* configure interrupts coalescing/mitigation */
2795	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2796#else
2797	/* no interrupt mitigation: one interrupt per packet */
2798	NFE_WRITE(sc, NFE_IMTIMER, 970);
2799#endif
2800
2801	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2802	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2803	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2804
2805	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2806	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2807
2808	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2809	/* Disable WOL. */
2810	NFE_WRITE(sc, NFE_WOL_CTL, 0);
2811
2812	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2813	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2814	DELAY(10);
2815	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2816
2817	/* set Rx filter */
2818	nfe_setmulti(sc);
2819
2820	/* enable Rx */
2821	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2822
2823	/* enable Tx */
2824	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2825
2826	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2827
2828	/* Clear hardware stats. */
2829	nfe_stats_clear(sc);
2830
2831#ifdef DEVICE_POLLING
2832	if (ifp->if_capenable & IFCAP_POLLING)
2833		nfe_disable_intr(sc);
2834	else
2835#endif
2836	nfe_set_intr(sc);
2837	nfe_enable_intr(sc); /* enable interrupts */
2838
2839	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2840	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2841
2842	sc->nfe_link = 0;
2843	mii_mediachg(mii);
2844
2845	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2846}
2847
2848
2849static void
2850nfe_stop(struct ifnet *ifp)
2851{
2852	struct nfe_softc *sc = ifp->if_softc;
2853	struct nfe_rx_ring *rx_ring;
2854	struct nfe_jrx_ring *jrx_ring;
2855	struct nfe_tx_ring *tx_ring;
2856	struct nfe_rx_data *rdata;
2857	struct nfe_tx_data *tdata;
2858	int i;
2859
2860	NFE_LOCK_ASSERT(sc);
2861
2862	sc->nfe_watchdog_timer = 0;
2863	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2864
2865	callout_stop(&sc->nfe_stat_ch);
2866
2867	/* abort Tx */
2868	NFE_WRITE(sc, NFE_TX_CTL, 0);
2869
2870	/* disable Rx */
2871	NFE_WRITE(sc, NFE_RX_CTL, 0);
2872
2873	/* disable interrupts */
2874	nfe_disable_intr(sc);
2875
2876	sc->nfe_link = 0;
2877
2878	/* free Rx and Tx mbufs still in the queues. */
2879	rx_ring = &sc->rxq;
2880	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2881		rdata = &rx_ring->data[i];
2882		if (rdata->m != NULL) {
2883			bus_dmamap_sync(rx_ring->rx_data_tag,
2884			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2885			bus_dmamap_unload(rx_ring->rx_data_tag,
2886			    rdata->rx_data_map);
2887			m_freem(rdata->m);
2888			rdata->m = NULL;
2889		}
2890	}
2891
2892	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2893		jrx_ring = &sc->jrxq;
2894		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2895			rdata = &jrx_ring->jdata[i];
2896			if (rdata->m != NULL) {
2897				bus_dmamap_sync(jrx_ring->jrx_data_tag,
2898				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2899				bus_dmamap_unload(jrx_ring->jrx_data_tag,
2900				    rdata->rx_data_map);
2901				m_freem(rdata->m);
2902				rdata->m = NULL;
2903			}
2904		}
2905	}
2906
2907	tx_ring = &sc->txq;
2908	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2909		tdata = &tx_ring->data[i];
2910		if (tdata->m != NULL) {
2911			bus_dmamap_sync(tx_ring->tx_data_tag,
2912			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2913			bus_dmamap_unload(tx_ring->tx_data_tag,
2914			    tdata->tx_data_map);
2915			m_freem(tdata->m);
2916			tdata->m = NULL;
2917		}
2918	}
2919	/* Update hardware stats. */
2920	nfe_stats_update(sc);
2921}
2922
2923
2924static int
2925nfe_ifmedia_upd(struct ifnet *ifp)
2926{
2927	struct nfe_softc *sc = ifp->if_softc;
2928	struct mii_data *mii;
2929
2930	NFE_LOCK(sc);
2931	mii = device_get_softc(sc->nfe_miibus);
2932	mii_mediachg(mii);
2933	NFE_UNLOCK(sc);
2934
2935	return (0);
2936}
2937
2938
2939static void
2940nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2941{
2942	struct nfe_softc *sc;
2943	struct mii_data *mii;
2944
2945	sc = ifp->if_softc;
2946
2947	NFE_LOCK(sc);
2948	mii = device_get_softc(sc->nfe_miibus);
2949	mii_pollstat(mii);
2950
2951	ifmr->ifm_active = mii->mii_media_active;
2952	ifmr->ifm_status = mii->mii_media_status;
2953	NFE_UNLOCK(sc);
2954}
2955
2956
2957void
2958nfe_tick(void *xsc)
2959{
2960	struct nfe_softc *sc;
2961	struct mii_data *mii;
2962	struct ifnet *ifp;
2963
2964	sc = (struct nfe_softc *)xsc;
2965
2966	NFE_LOCK_ASSERT(sc);
2967
2968	ifp = sc->nfe_ifp;
2969
2970	mii = device_get_softc(sc->nfe_miibus);
2971	mii_tick(mii);
2972	nfe_stats_update(sc);
2973	nfe_watchdog(ifp);
2974	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2975}
2976
2977
2978static int
2979nfe_shutdown(device_t dev)
2980{
2981
2982	return (nfe_suspend(dev));
2983}
2984
2985
2986static void
2987nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2988{
2989	uint32_t val;
2990
2991	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2992		val = NFE_READ(sc, NFE_MACADDR_LO);
2993		addr[0] = (val >> 8) & 0xff;
2994		addr[1] = (val & 0xff);
2995
2996		val = NFE_READ(sc, NFE_MACADDR_HI);
2997		addr[2] = (val >> 24) & 0xff;
2998		addr[3] = (val >> 16) & 0xff;
2999		addr[4] = (val >>  8) & 0xff;
3000		addr[5] = (val & 0xff);
3001	} else {
3002		val = NFE_READ(sc, NFE_MACADDR_LO);
3003		addr[5] = (val >> 8) & 0xff;
3004		addr[4] = (val & 0xff);
3005
3006		val = NFE_READ(sc, NFE_MACADDR_HI);
3007		addr[3] = (val >> 24) & 0xff;
3008		addr[2] = (val >> 16) & 0xff;
3009		addr[1] = (val >>  8) & 0xff;
3010		addr[0] = (val & 0xff);
3011	}
3012}
3013
3014
3015static void
3016nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3017{
3018
3019	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
3020	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3021	    addr[1] << 8 | addr[0]);
3022}
3023
3024
3025/*
3026 * Map a single buffer address.
3027 */
3028
3029static void
3030nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3031{
3032	struct nfe_dmamap_arg *ctx;
3033
3034	if (error != 0)
3035		return;
3036
3037	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3038
3039	ctx = (struct nfe_dmamap_arg *)arg;
3040	ctx->nfe_busaddr = segs[0].ds_addr;
3041}
3042
3043
3044static int
3045sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3046{
3047	int error, value;
3048
3049	if (!arg1)
3050		return (EINVAL);
3051	value = *(int *)arg1;
3052	error = sysctl_handle_int(oidp, &value, 0, req);
3053	if (error || !req->newptr)
3054		return (error);
3055	if (value < low || value > high)
3056		return (EINVAL);
3057	*(int *)arg1 = value;
3058
3059	return (0);
3060}
3061
3062
3063static int
3064sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3065{
3066
3067	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3068	    NFE_PROC_MAX));
3069}
3070
3071
3072#define	NFE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
3073	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3074#define	NFE_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
3075	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3076
3077static void
3078nfe_sysctl_node(struct nfe_softc *sc)
3079{
3080	struct sysctl_ctx_list *ctx;
3081	struct sysctl_oid_list *child, *parent;
3082	struct sysctl_oid *tree;
3083	struct nfe_hw_stats *stats;
3084	int error;
3085
3086	stats = &sc->nfe_stats;
3087	ctx = device_get_sysctl_ctx(sc->nfe_dev);
3088	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3089	SYSCTL_ADD_PROC(ctx, child,
3090	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3091	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3092	    "max number of Rx events to process");
3093
3094	sc->nfe_process_limit = NFE_PROC_DEFAULT;
3095	error = resource_int_value(device_get_name(sc->nfe_dev),
3096	    device_get_unit(sc->nfe_dev), "process_limit",
3097	    &sc->nfe_process_limit);
3098	if (error == 0) {
3099		if (sc->nfe_process_limit < NFE_PROC_MIN ||
3100		    sc->nfe_process_limit > NFE_PROC_MAX) {
3101			device_printf(sc->nfe_dev,
3102			    "process_limit value out of range; "
3103			    "using default: %d\n", NFE_PROC_DEFAULT);
3104			sc->nfe_process_limit = NFE_PROC_DEFAULT;
3105		}
3106	}
3107
3108	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3109		return;
3110
3111	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3112	    NULL, "NFE statistics");
3113	parent = SYSCTL_CHILDREN(tree);
3114
3115	/* Rx statistics. */
3116	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3117	    NULL, "Rx MAC statistics");
3118	child = SYSCTL_CHILDREN(tree);
3119
3120	NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3121	    &stats->rx_frame_errors, "Framing Errors");
3122	NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3123	    &stats->rx_extra_bytes, "Extra Bytes");
3124	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3125	    &stats->rx_late_cols, "Late Collisions");
3126	NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3127	    &stats->rx_runts, "Runts");
3128	NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3129	    &stats->rx_jumbos, "Jumbos");
3130	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3131	    &stats->rx_fifo_overuns, "FIFO Overruns");
3132	NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3133	    &stats->rx_crc_errors, "CRC Errors");
3134	NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3135	    &stats->rx_fae, "Frame Alignment Errors");
3136	NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3137	    &stats->rx_len_errors, "Length Errors");
3138	NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3139	    &stats->rx_unicast, "Unicast Frames");
3140	NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3141	    &stats->rx_multicast, "Multicast Frames");
3142	NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3143	    &stats->rx_broadcast, "Broadcast Frames");
3144	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3145		NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3146		    &stats->rx_octets, "Octets");
3147		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3148		    &stats->rx_pause, "Pause frames");
3149		NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3150		    &stats->rx_drops, "Drop frames");
3151	}
3152
3153	/* Tx statistics. */
3154	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3155	    NULL, "Tx MAC statistics");
3156	child = SYSCTL_CHILDREN(tree);
3157	NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3158	    &stats->tx_octets, "Octets");
3159	NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3160	    &stats->tx_zero_rexmits, "Zero Retransmits");
3161	NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3162	    &stats->tx_one_rexmits, "One Retransmits");
3163	NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3164	    &stats->tx_multi_rexmits, "Multiple Retransmits");
3165	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3166	    &stats->tx_late_cols, "Late Collisions");
3167	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3168	    &stats->tx_fifo_underuns, "FIFO Underruns");
3169	NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3170	    &stats->tx_carrier_losts, "Carrier Losts");
3171	NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3172	    &stats->tx_excess_deferals, "Excess Deferrals");
3173	NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3174	    &stats->tx_retry_errors, "Retry Errors");
3175	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3176		NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3177		    &stats->tx_deferals, "Deferrals");
3178		NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3179		    &stats->tx_frames, "Frames");
3180		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3181		    &stats->tx_pause, "Pause Frames");
3182	}
3183	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3184		NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3185		    &stats->tx_deferals, "Unicast Frames");
3186		NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3187		    &stats->tx_frames, "Multicast Frames");
3188		NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3189		    &stats->tx_pause, "Broadcast Frames");
3190	}
3191}
3192
3193#undef NFE_SYSCTL_STAT_ADD32
3194#undef NFE_SYSCTL_STAT_ADD64
3195
3196static void
3197nfe_stats_clear(struct nfe_softc *sc)
3198{
3199	int i, mib_cnt;
3200
3201	if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3202		mib_cnt = NFE_NUM_MIB_STATV1;
3203	else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3204		mib_cnt = NFE_NUM_MIB_STATV2;
3205	else
3206		return;
3207
3208	for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3209		NFE_READ(sc, NFE_TX_OCTET + i);
3210
3211	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3212		NFE_READ(sc, NFE_TX_UNICAST);
3213		NFE_READ(sc, NFE_TX_MULTICAST);
3214		NFE_READ(sc, NFE_TX_BROADCAST);
3215	}
3216}
3217
3218static void
3219nfe_stats_update(struct nfe_softc *sc)
3220{
3221	struct nfe_hw_stats *stats;
3222
3223	NFE_LOCK_ASSERT(sc);
3224
3225	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3226		return;
3227
3228	stats = &sc->nfe_stats;
3229	stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3230	stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3231	stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3232	stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3233	stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3234	stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3235	stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3236	stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3237	stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3238	stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3239	stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3240	stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3241	stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3242	stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3243	stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3244	stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3245	stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3246	stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3247	stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3248	stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3249	stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3250
3251	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3252		stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3253		stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3254		stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3255		stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3256		stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3257		stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3258	}
3259
3260	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3261		stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3262		stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3263		stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3264	}
3265}
3266
3267
3268static void
3269nfe_set_linkspeed(struct nfe_softc *sc)
3270{
3271	struct mii_softc *miisc;
3272	struct mii_data *mii;
3273	int aneg, i, phyno;
3274
3275	NFE_LOCK_ASSERT(sc);
3276
3277	mii = device_get_softc(sc->nfe_miibus);
3278	mii_pollstat(mii);
3279	aneg = 0;
3280	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3281	    (IFM_ACTIVE | IFM_AVALID)) {
3282		switch IFM_SUBTYPE(mii->mii_media_active) {
3283		case IFM_10_T:
3284		case IFM_100_TX:
3285			return;
3286		case IFM_1000_T:
3287			aneg++;
3288			break;
3289		default:
3290			break;
3291		}
3292	}
3293	miisc = LIST_FIRST(&mii->mii_phys);
3294	phyno = miisc->mii_phy;
3295	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3296		PHY_RESET(miisc);
3297	nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3298	nfe_miibus_writereg(sc->nfe_dev, phyno,
3299	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3300	nfe_miibus_writereg(sc->nfe_dev, phyno,
3301	    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3302	DELAY(1000);
3303	if (aneg != 0) {
3304		/*
3305		 * Poll link state until nfe(4) get a 10/100Mbps link.
3306		 */
3307		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3308			mii_pollstat(mii);
3309			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3310			    == (IFM_ACTIVE | IFM_AVALID)) {
3311				switch (IFM_SUBTYPE(mii->mii_media_active)) {
3312				case IFM_10_T:
3313				case IFM_100_TX:
3314					nfe_mac_config(sc, mii);
3315					return;
3316				default:
3317					break;
3318				}
3319			}
3320			NFE_UNLOCK(sc);
3321			pause("nfelnk", hz);
3322			NFE_LOCK(sc);
3323		}
3324		if (i == MII_ANEGTICKS_GIGE)
3325			device_printf(sc->nfe_dev,
3326			    "establishing a link failed, WOL may not work!");
3327	}
3328	/*
3329	 * No link, force MAC to have 100Mbps, full-duplex link.
3330	 * This is the last resort and may/may not work.
3331	 */
3332	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3333	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3334	nfe_mac_config(sc, mii);
3335}
3336
3337
3338static void
3339nfe_set_wol(struct nfe_softc *sc)
3340{
3341	struct ifnet *ifp;
3342	uint32_t wolctl;
3343	int pmc;
3344	uint16_t pmstat;
3345
3346	NFE_LOCK_ASSERT(sc);
3347
3348	if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3349		return;
3350	ifp = sc->nfe_ifp;
3351	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3352		wolctl = NFE_WOL_MAGIC;
3353	else
3354		wolctl = 0;
3355	NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3356	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3357		nfe_set_linkspeed(sc);
3358		if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3359			NFE_WRITE(sc, NFE_PWR2_CTL,
3360			    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3361		/* Enable RX. */
3362		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3363		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3364		NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3365		    NFE_RX_START);
3366	}
3367	/* Request PME if WOL is requested. */
3368	pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3369	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3370	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3371		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3372	pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3373}
3374