if_nfe.c revision 213894
1/*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 213894 2010-10-15 15:00:30Z marius $");
25
26#ifdef HAVE_KERNEL_OPTION_HEADERS
27#include "opt_device_polling.h"
28#endif
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/kernel.h>
38#include <sys/queue.h>
39#include <sys/socket.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/if.h>
44#include <net/if_arp.h>
45#include <net/ethernet.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_vlan_var.h>
50
51#include <net/bpf.h>
52
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63
64#include <dev/nfe/if_nfereg.h>
65#include <dev/nfe/if_nfevar.h>
66
67MODULE_DEPEND(nfe, pci, 1, 1, 1);
68MODULE_DEPEND(nfe, ether, 1, 1, 1);
69MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70
71/* "device miibus" required.  See GENERIC if you get errors here. */
72#include "miibus_if.h"
73
74static int  nfe_probe(device_t);
75static int  nfe_attach(device_t);
76static int  nfe_detach(device_t);
77static int  nfe_suspend(device_t);
78static int  nfe_resume(device_t);
79static int nfe_shutdown(device_t);
80static void nfe_power(struct nfe_softc *);
81static int  nfe_miibus_readreg(device_t, int, int);
82static int  nfe_miibus_writereg(device_t, int, int, int);
83static void nfe_miibus_statchg(device_t);
84static void nfe_link_task(void *, int);
85static void nfe_set_intr(struct nfe_softc *);
86static __inline void nfe_enable_intr(struct nfe_softc *);
87static __inline void nfe_disable_intr(struct nfe_softc *);
88static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
89static void nfe_alloc_msix(struct nfe_softc *, int);
90static int nfe_intr(void *);
91static void nfe_int_task(void *, int);
92static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
93static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
94static int nfe_newbuf(struct nfe_softc *, int);
95static int nfe_jnewbuf(struct nfe_softc *, int);
96static int  nfe_rxeof(struct nfe_softc *, int, int *);
97static int  nfe_jrxeof(struct nfe_softc *, int, int *);
98static void nfe_txeof(struct nfe_softc *);
99static int  nfe_encap(struct nfe_softc *, struct mbuf **);
100static void nfe_setmulti(struct nfe_softc *);
101static void nfe_tx_task(void *, int);
102static void nfe_start(struct ifnet *);
103static void nfe_watchdog(struct ifnet *);
104static void nfe_init(void *);
105static void nfe_init_locked(void *);
106static void nfe_stop(struct ifnet *);
107static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
109static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
114static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116static int  nfe_ifmedia_upd(struct ifnet *);
117static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
118static void nfe_tick(void *);
119static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
120static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
121static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
122
123static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
124static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
125static void nfe_sysctl_node(struct nfe_softc *);
126static void nfe_stats_clear(struct nfe_softc *);
127static void nfe_stats_update(struct nfe_softc *);
128
129#ifdef NFE_DEBUG
130static int nfedebug = 0;
131#define	DPRINTF(sc, ...)	do {				\
132	if (nfedebug)						\
133		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
134} while (0)
135#define	DPRINTFN(sc, n, ...)	do {				\
136	if (nfedebug >= (n))					\
137		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
138} while (0)
139#else
140#define	DPRINTF(sc, ...)
141#define	DPRINTFN(sc, n, ...)
142#endif
143
144#define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
145#define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
146#define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
147
148/* Tunables. */
149static int msi_disable = 0;
150static int msix_disable = 0;
151static int jumbo_disable = 0;
152TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
153TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
154TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
155
156static device_method_t nfe_methods[] = {
157	/* Device interface */
158	DEVMETHOD(device_probe,		nfe_probe),
159	DEVMETHOD(device_attach,	nfe_attach),
160	DEVMETHOD(device_detach,	nfe_detach),
161	DEVMETHOD(device_suspend,	nfe_suspend),
162	DEVMETHOD(device_resume,	nfe_resume),
163	DEVMETHOD(device_shutdown,	nfe_shutdown),
164
165	/* bus interface */
166	DEVMETHOD(bus_print_child,	bus_generic_print_child),
167	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
168
169	/* MII interface */
170	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
171	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
172	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
173
174	{ NULL, NULL }
175};
176
177static driver_t nfe_driver = {
178	"nfe",
179	nfe_methods,
180	sizeof(struct nfe_softc)
181};
182
183static devclass_t nfe_devclass;
184
185DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
186DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
187
188static struct nfe_type nfe_devs[] = {
189	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190	    "NVIDIA nForce MCP Networking Adapter"},
191	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
192	    "NVIDIA nForce2 MCP2 Networking Adapter"},
193	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
194	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
195	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
196	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
197	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
198	    "NVIDIA nForce3 MCP3 Networking Adapter"},
199	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
200	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
201	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
202	    "NVIDIA nForce3 MCP7 Networking Adapter"},
203	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
204	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
205	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
206	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
207	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
208	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
209	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
210	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
211	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
212	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
213	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
214	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
215	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
216	    "NVIDIA nForce MCP55 Networking Adapter"},
217	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
218	    "NVIDIA nForce MCP55 Networking Adapter"},
219	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
220	    "NVIDIA nForce MCP61 Networking Adapter"},
221	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
222	    "NVIDIA nForce MCP61 Networking Adapter"},
223	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
224	    "NVIDIA nForce MCP61 Networking Adapter"},
225	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
226	    "NVIDIA nForce MCP61 Networking Adapter"},
227	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
228	    "NVIDIA nForce MCP65 Networking Adapter"},
229	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
230	    "NVIDIA nForce MCP65 Networking Adapter"},
231	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
232	    "NVIDIA nForce MCP65 Networking Adapter"},
233	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
234	    "NVIDIA nForce MCP65 Networking Adapter"},
235	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
236	    "NVIDIA nForce MCP67 Networking Adapter"},
237	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
238	    "NVIDIA nForce MCP67 Networking Adapter"},
239	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
240	    "NVIDIA nForce MCP67 Networking Adapter"},
241	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
242	    "NVIDIA nForce MCP67 Networking Adapter"},
243	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
244	    "NVIDIA nForce MCP73 Networking Adapter"},
245	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
246	    "NVIDIA nForce MCP73 Networking Adapter"},
247	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
248	    "NVIDIA nForce MCP73 Networking Adapter"},
249	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
250	    "NVIDIA nForce MCP73 Networking Adapter"},
251	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
252	    "NVIDIA nForce MCP77 Networking Adapter"},
253	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
254	    "NVIDIA nForce MCP77 Networking Adapter"},
255	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
256	    "NVIDIA nForce MCP77 Networking Adapter"},
257	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
258	    "NVIDIA nForce MCP77 Networking Adapter"},
259	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
260	    "NVIDIA nForce MCP79 Networking Adapter"},
261	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
262	    "NVIDIA nForce MCP79 Networking Adapter"},
263	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
264	    "NVIDIA nForce MCP79 Networking Adapter"},
265	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
266	    "NVIDIA nForce MCP79 Networking Adapter"},
267	{0, 0, NULL}
268};
269
270
271/* Probe for supported hardware ID's */
272static int
273nfe_probe(device_t dev)
274{
275	struct nfe_type *t;
276
277	t = nfe_devs;
278	/* Check for matching PCI DEVICE ID's */
279	while (t->name != NULL) {
280		if ((pci_get_vendor(dev) == t->vid_id) &&
281		    (pci_get_device(dev) == t->dev_id)) {
282			device_set_desc(dev, t->name);
283			return (BUS_PROBE_DEFAULT);
284		}
285		t++;
286	}
287
288	return (ENXIO);
289}
290
291static void
292nfe_alloc_msix(struct nfe_softc *sc, int count)
293{
294	int rid;
295
296	rid = PCIR_BAR(2);
297	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
298	    &rid, RF_ACTIVE);
299	if (sc->nfe_msix_res == NULL) {
300		device_printf(sc->nfe_dev,
301		    "couldn't allocate MSIX table resource\n");
302		return;
303	}
304	rid = PCIR_BAR(3);
305	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
306	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
307	if (sc->nfe_msix_pba_res == NULL) {
308		device_printf(sc->nfe_dev,
309		    "couldn't allocate MSIX PBA resource\n");
310		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
311		    sc->nfe_msix_res);
312		sc->nfe_msix_res = NULL;
313		return;
314	}
315
316	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
317		if (count == NFE_MSI_MESSAGES) {
318			if (bootverbose)
319				device_printf(sc->nfe_dev,
320				    "Using %d MSIX messages\n", count);
321			sc->nfe_msix = 1;
322		} else {
323			if (bootverbose)
324				device_printf(sc->nfe_dev,
325				    "couldn't allocate MSIX\n");
326			pci_release_msi(sc->nfe_dev);
327			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328			    PCIR_BAR(3), sc->nfe_msix_pba_res);
329			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330			    PCIR_BAR(2), sc->nfe_msix_res);
331			sc->nfe_msix_pba_res = NULL;
332			sc->nfe_msix_res = NULL;
333		}
334	}
335}
336
337static int
338nfe_attach(device_t dev)
339{
340	struct nfe_softc *sc;
341	struct ifnet *ifp;
342	bus_addr_t dma_addr_max;
343	int error = 0, i, msic, reg, rid;
344
345	sc = device_get_softc(dev);
346	sc->nfe_dev = dev;
347
348	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
349	    MTX_DEF);
350	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
351	TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
352
353	pci_enable_busmaster(dev);
354
355	rid = PCIR_BAR(0);
356	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
357	    RF_ACTIVE);
358	if (sc->nfe_res[0] == NULL) {
359		device_printf(dev, "couldn't map memory resources\n");
360		mtx_destroy(&sc->nfe_mtx);
361		return (ENXIO);
362	}
363
364	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
365		uint16_t v, width;
366
367		v = pci_read_config(dev, reg + 0x08, 2);
368		/* Change max. read request size to 4096. */
369		v &= ~(7 << 12);
370		v |= (5 << 12);
371		pci_write_config(dev, reg + 0x08, v, 2);
372
373		v = pci_read_config(dev, reg + 0x0c, 2);
374		/* link capability */
375		v = (v >> 4) & 0x0f;
376		width = pci_read_config(dev, reg + 0x12, 2);
377		/* negotiated link width */
378		width = (width >> 4) & 0x3f;
379		if (v != width)
380			device_printf(sc->nfe_dev,
381			    "warning, negotiated width of link(x%d) != "
382			    "max. width of link(x%d)\n", width, v);
383	}
384
385	/* Allocate interrupt */
386	if (msix_disable == 0 || msi_disable == 0) {
387		if (msix_disable == 0 &&
388		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
389			nfe_alloc_msix(sc, msic);
390		if (msi_disable == 0 && sc->nfe_msix == 0 &&
391		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
392		    pci_alloc_msi(dev, &msic) == 0) {
393			if (msic == NFE_MSI_MESSAGES) {
394				if (bootverbose)
395					device_printf(dev,
396					    "Using %d MSI messages\n", msic);
397				sc->nfe_msi = 1;
398			} else
399				pci_release_msi(dev);
400		}
401	}
402
403	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
404		rid = 0;
405		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
406		    RF_SHAREABLE | RF_ACTIVE);
407		if (sc->nfe_irq[0] == NULL) {
408			device_printf(dev, "couldn't allocate IRQ resources\n");
409			error = ENXIO;
410			goto fail;
411		}
412	} else {
413		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
414			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
415			    SYS_RES_IRQ, &rid, RF_ACTIVE);
416			if (sc->nfe_irq[i] == NULL) {
417				device_printf(dev,
418				    "couldn't allocate IRQ resources for "
419				    "message %d\n", rid);
420				error = ENXIO;
421				goto fail;
422			}
423		}
424		/* Map interrupts to vector 0. */
425		if (sc->nfe_msix != 0) {
426			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
427			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
428		} else if (sc->nfe_msi != 0) {
429			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
430			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
431		}
432	}
433
434	/* Set IRQ status/mask register. */
435	sc->nfe_irq_status = NFE_IRQ_STATUS;
436	sc->nfe_irq_mask = NFE_IRQ_MASK;
437	sc->nfe_intrs = NFE_IRQ_WANTED;
438	sc->nfe_nointrs = 0;
439	if (sc->nfe_msix != 0) {
440		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
441		sc->nfe_nointrs = NFE_IRQ_WANTED;
442	} else if (sc->nfe_msi != 0) {
443		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
444		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
445	}
446
447	sc->nfe_devid = pci_get_device(dev);
448	sc->nfe_revid = pci_get_revid(dev);
449	sc->nfe_flags = 0;
450
451	switch (sc->nfe_devid) {
452	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
453	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
454	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
455	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
456		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
457		break;
458	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
459	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
460		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
461		break;
462	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
463	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
464	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
465	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
466		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
467		    NFE_MIB_V1;
468		break;
469	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
470	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
471		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
472		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
473		break;
474
475	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
476	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
477	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
478	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
479	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
480	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
481	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
482	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
483	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
484	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
485	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
486	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
487		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
488		    NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
489		break;
490	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
491	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
492	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
493	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
494		/* XXX flow control */
495		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
496		    NFE_CORRECT_MACADDR | NFE_MIB_V3;
497		break;
498	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
499	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
500	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
501	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
502		/* XXX flow control */
503		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
504		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
505		break;
506	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
507	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
508	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
509	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
510		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
511		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
512		    NFE_MIB_V2;
513		break;
514	}
515
516	nfe_power(sc);
517	/* Check for reversed ethernet address */
518	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
519		sc->nfe_flags |= NFE_CORRECT_MACADDR;
520	nfe_get_macaddr(sc, sc->eaddr);
521	/*
522	 * Allocate the parent bus DMA tag appropriate for PCI.
523	 */
524	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
525	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
526		dma_addr_max = NFE_DMA_MAXADDR;
527	error = bus_dma_tag_create(
528	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
529	    1, 0,				/* alignment, boundary */
530	    dma_addr_max,			/* lowaddr */
531	    BUS_SPACE_MAXADDR,			/* highaddr */
532	    NULL, NULL,				/* filter, filterarg */
533	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
534	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
535	    0,					/* flags */
536	    NULL, NULL,				/* lockfunc, lockarg */
537	    &sc->nfe_parent_tag);
538	if (error)
539		goto fail;
540
541	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
542	if (ifp == NULL) {
543		device_printf(dev, "can not if_alloc()\n");
544		error = ENOSPC;
545		goto fail;
546	}
547	TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
548
549	/*
550	 * Allocate Tx and Rx rings.
551	 */
552	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
553		goto fail;
554
555	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
556		goto fail;
557
558	nfe_alloc_jrx_ring(sc, &sc->jrxq);
559	/* Create sysctl node. */
560	nfe_sysctl_node(sc);
561
562	ifp->if_softc = sc;
563	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
564	ifp->if_mtu = ETHERMTU;
565	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
566	ifp->if_ioctl = nfe_ioctl;
567	ifp->if_start = nfe_start;
568	ifp->if_hwassist = 0;
569	ifp->if_capabilities = 0;
570	ifp->if_init = nfe_init;
571	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
572	ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
573	IFQ_SET_READY(&ifp->if_snd);
574
575	if (sc->nfe_flags & NFE_HW_CSUM) {
576		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
577		ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
578	}
579	ifp->if_capenable = ifp->if_capabilities;
580
581	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
582	/* VLAN capability setup. */
583	ifp->if_capabilities |= IFCAP_VLAN_MTU;
584	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
585		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
586		if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
587			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
588	}
589	ifp->if_capenable = ifp->if_capabilities;
590
591	/*
592	 * Tell the upper layer(s) we support long frames.
593	 * Must appear after the call to ether_ifattach() because
594	 * ether_ifattach() sets ifi_hdrlen to the default value.
595	 */
596	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
597
598#ifdef DEVICE_POLLING
599	ifp->if_capabilities |= IFCAP_POLLING;
600#endif
601
602	/* Do MII setup */
603	error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
604	    nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
605	if (error != 0) {
606		device_printf(dev, "attaching PHYs failed\n");
607		goto fail;
608	}
609	ether_ifattach(ifp, sc->eaddr);
610
611	TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
612	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
613	    taskqueue_thread_enqueue, &sc->nfe_tq);
614	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
615	    device_get_nameunit(sc->nfe_dev));
616	error = 0;
617	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
618		error = bus_setup_intr(dev, sc->nfe_irq[0],
619		    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
620		    &sc->nfe_intrhand[0]);
621	} else {
622		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
623			error = bus_setup_intr(dev, sc->nfe_irq[i],
624			    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
625			    &sc->nfe_intrhand[i]);
626			if (error != 0)
627				break;
628		}
629	}
630	if (error) {
631		device_printf(dev, "couldn't set up irq\n");
632		taskqueue_free(sc->nfe_tq);
633		sc->nfe_tq = NULL;
634		ether_ifdetach(ifp);
635		goto fail;
636	}
637
638fail:
639	if (error)
640		nfe_detach(dev);
641
642	return (error);
643}
644
645
646static int
647nfe_detach(device_t dev)
648{
649	struct nfe_softc *sc;
650	struct ifnet *ifp;
651	uint8_t eaddr[ETHER_ADDR_LEN];
652	int i, rid;
653
654	sc = device_get_softc(dev);
655	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
656	ifp = sc->nfe_ifp;
657
658#ifdef DEVICE_POLLING
659	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
660		ether_poll_deregister(ifp);
661#endif
662	if (device_is_attached(dev)) {
663		NFE_LOCK(sc);
664		nfe_stop(ifp);
665		ifp->if_flags &= ~IFF_UP;
666		NFE_UNLOCK(sc);
667		callout_drain(&sc->nfe_stat_ch);
668		taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
669		taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
670		ether_ifdetach(ifp);
671	}
672
673	if (ifp) {
674		/* restore ethernet address */
675		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
676			for (i = 0; i < ETHER_ADDR_LEN; i++) {
677				eaddr[i] = sc->eaddr[5 - i];
678			}
679		} else
680			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
681		nfe_set_macaddr(sc, eaddr);
682		if_free(ifp);
683	}
684	if (sc->nfe_miibus)
685		device_delete_child(dev, sc->nfe_miibus);
686	bus_generic_detach(dev);
687	if (sc->nfe_tq != NULL) {
688		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
689		taskqueue_free(sc->nfe_tq);
690		sc->nfe_tq = NULL;
691	}
692
693	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
694		if (sc->nfe_intrhand[i] != NULL) {
695			bus_teardown_intr(dev, sc->nfe_irq[i],
696			    sc->nfe_intrhand[i]);
697			sc->nfe_intrhand[i] = NULL;
698		}
699	}
700
701	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
702		if (sc->nfe_irq[0] != NULL)
703			bus_release_resource(dev, SYS_RES_IRQ, 0,
704			    sc->nfe_irq[0]);
705	} else {
706		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
707			if (sc->nfe_irq[i] != NULL) {
708				bus_release_resource(dev, SYS_RES_IRQ, rid,
709				    sc->nfe_irq[i]);
710				sc->nfe_irq[i] = NULL;
711			}
712		}
713		pci_release_msi(dev);
714	}
715	if (sc->nfe_msix_pba_res != NULL) {
716		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
717		    sc->nfe_msix_pba_res);
718		sc->nfe_msix_pba_res = NULL;
719	}
720	if (sc->nfe_msix_res != NULL) {
721		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
722		    sc->nfe_msix_res);
723		sc->nfe_msix_res = NULL;
724	}
725	if (sc->nfe_res[0] != NULL) {
726		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
727		    sc->nfe_res[0]);
728		sc->nfe_res[0] = NULL;
729	}
730
731	nfe_free_tx_ring(sc, &sc->txq);
732	nfe_free_rx_ring(sc, &sc->rxq);
733	nfe_free_jrx_ring(sc, &sc->jrxq);
734
735	if (sc->nfe_parent_tag) {
736		bus_dma_tag_destroy(sc->nfe_parent_tag);
737		sc->nfe_parent_tag = NULL;
738	}
739
740	mtx_destroy(&sc->nfe_mtx);
741
742	return (0);
743}
744
745
746static int
747nfe_suspend(device_t dev)
748{
749	struct nfe_softc *sc;
750
751	sc = device_get_softc(dev);
752
753	NFE_LOCK(sc);
754	nfe_stop(sc->nfe_ifp);
755	sc->nfe_suspended = 1;
756	NFE_UNLOCK(sc);
757
758	return (0);
759}
760
761
762static int
763nfe_resume(device_t dev)
764{
765	struct nfe_softc *sc;
766	struct ifnet *ifp;
767
768	sc = device_get_softc(dev);
769
770	NFE_LOCK(sc);
771	ifp = sc->nfe_ifp;
772	if (ifp->if_flags & IFF_UP)
773		nfe_init_locked(sc);
774	sc->nfe_suspended = 0;
775	NFE_UNLOCK(sc);
776
777	return (0);
778}
779
780
781/* Take PHY/NIC out of powerdown, from Linux */
782static void
783nfe_power(struct nfe_softc *sc)
784{
785	uint32_t pwr;
786
787	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
788		return;
789	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
790	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
791	DELAY(100);
792	NFE_WRITE(sc, NFE_MAC_RESET, 0);
793	DELAY(100);
794	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
795	pwr = NFE_READ(sc, NFE_PWR2_CTL);
796	pwr &= ~NFE_PWR2_WAKEUP_MASK;
797	if (sc->nfe_revid >= 0xa3 &&
798	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
799	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
800		pwr |= NFE_PWR2_REVA3;
801	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
802}
803
804
805static void
806nfe_miibus_statchg(device_t dev)
807{
808	struct nfe_softc *sc;
809
810	sc = device_get_softc(dev);
811	taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
812}
813
814
815static void
816nfe_link_task(void *arg, int pending)
817{
818	struct nfe_softc *sc;
819	struct mii_data *mii;
820	struct ifnet *ifp;
821	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
822	uint32_t gmask, rxctl, txctl, val;
823
824	sc = (struct nfe_softc *)arg;
825
826	NFE_LOCK(sc);
827
828	mii = device_get_softc(sc->nfe_miibus);
829	ifp = sc->nfe_ifp;
830	if (mii == NULL || ifp == NULL) {
831		NFE_UNLOCK(sc);
832		return;
833	}
834
835	if (mii->mii_media_status & IFM_ACTIVE) {
836		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
837			sc->nfe_link = 1;
838	} else
839		sc->nfe_link = 0;
840
841	phy = NFE_READ(sc, NFE_PHY_IFACE);
842	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
843
844	seed = NFE_READ(sc, NFE_RNDSEED);
845	seed &= ~NFE_SEED_MASK;
846
847	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
848		phy  |= NFE_PHY_HDX;	/* half-duplex */
849		misc |= NFE_MISC1_HDX;
850	}
851
852	switch (IFM_SUBTYPE(mii->mii_media_active)) {
853	case IFM_1000_T:	/* full-duplex only */
854		link |= NFE_MEDIA_1000T;
855		seed |= NFE_SEED_1000T;
856		phy  |= NFE_PHY_1000T;
857		break;
858	case IFM_100_TX:
859		link |= NFE_MEDIA_100TX;
860		seed |= NFE_SEED_100TX;
861		phy  |= NFE_PHY_100TX;
862		break;
863	case IFM_10_T:
864		link |= NFE_MEDIA_10T;
865		seed |= NFE_SEED_10T;
866		break;
867	}
868
869	if ((phy & 0x10000000) != 0) {
870		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
871			val = NFE_R1_MAGIC_1000;
872		else
873			val = NFE_R1_MAGIC_10_100;
874	} else
875		val = NFE_R1_MAGIC_DEFAULT;
876	NFE_WRITE(sc, NFE_SETUP_R1, val);
877
878	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
879
880	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
881	NFE_WRITE(sc, NFE_MISC1, misc);
882	NFE_WRITE(sc, NFE_LINKSPEED, link);
883
884	gmask = mii->mii_media_active & IFM_GMASK;
885	if ((gmask & IFM_FDX) != 0) {
886		/* It seems all hardwares supports Rx pause frames. */
887		val = NFE_READ(sc, NFE_RXFILTER);
888		if ((gmask & IFM_FLAG0) != 0)
889			val |= NFE_PFF_RX_PAUSE;
890		else
891			val &= ~NFE_PFF_RX_PAUSE;
892		NFE_WRITE(sc, NFE_RXFILTER, val);
893		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
894			val = NFE_READ(sc, NFE_MISC1);
895			if ((gmask & IFM_FLAG1) != 0) {
896				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
897				    NFE_TX_PAUSE_FRAME_ENABLE);
898				val |= NFE_MISC1_TX_PAUSE;
899			} else {
900				val &= ~NFE_MISC1_TX_PAUSE;
901				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
902				    NFE_TX_PAUSE_FRAME_DISABLE);
903			}
904			NFE_WRITE(sc, NFE_MISC1, val);
905		}
906	} else {
907		/* disable rx/tx pause frames */
908		val = NFE_READ(sc, NFE_RXFILTER);
909		val &= ~NFE_PFF_RX_PAUSE;
910		NFE_WRITE(sc, NFE_RXFILTER, val);
911		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
912			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
913			    NFE_TX_PAUSE_FRAME_DISABLE);
914			val = NFE_READ(sc, NFE_MISC1);
915			val &= ~NFE_MISC1_TX_PAUSE;
916			NFE_WRITE(sc, NFE_MISC1, val);
917		}
918	}
919
920	txctl = NFE_READ(sc, NFE_TX_CTL);
921	rxctl = NFE_READ(sc, NFE_RX_CTL);
922	if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
923		txctl |= NFE_TX_START;
924		rxctl |= NFE_RX_START;
925	} else {
926		txctl &= ~NFE_TX_START;
927		rxctl &= ~NFE_RX_START;
928	}
929	NFE_WRITE(sc, NFE_TX_CTL, txctl);
930	NFE_WRITE(sc, NFE_RX_CTL, rxctl);
931
932	NFE_UNLOCK(sc);
933}
934
935
936static int
937nfe_miibus_readreg(device_t dev, int phy, int reg)
938{
939	struct nfe_softc *sc = device_get_softc(dev);
940	uint32_t val;
941	int ntries;
942
943	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
944
945	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
946		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
947		DELAY(100);
948	}
949
950	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
951
952	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
953		DELAY(100);
954		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
955			break;
956	}
957	if (ntries == NFE_TIMEOUT) {
958		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
959		return 0;
960	}
961
962	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
963		DPRINTFN(sc, 2, "could not read PHY\n");
964		return 0;
965	}
966
967	val = NFE_READ(sc, NFE_PHY_DATA);
968	if (val != 0xffffffff && val != 0)
969		sc->mii_phyaddr = phy;
970
971	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
972
973	return (val);
974}
975
976
977static int
978nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
979{
980	struct nfe_softc *sc = device_get_softc(dev);
981	uint32_t ctl;
982	int ntries;
983
984	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
985
986	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
987		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
988		DELAY(100);
989	}
990
991	NFE_WRITE(sc, NFE_PHY_DATA, val);
992	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
993	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
994
995	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
996		DELAY(100);
997		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
998			break;
999	}
1000#ifdef NFE_DEBUG
1001	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1002		device_printf(sc->nfe_dev, "could not write to PHY\n");
1003#endif
1004	return (0);
1005}
1006
1007struct nfe_dmamap_arg {
1008	bus_addr_t nfe_busaddr;
1009};
1010
1011static int
1012nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1013{
1014	struct nfe_dmamap_arg ctx;
1015	struct nfe_rx_data *data;
1016	void *desc;
1017	int i, error, descsize;
1018
1019	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1020		desc = ring->desc64;
1021		descsize = sizeof (struct nfe_desc64);
1022	} else {
1023		desc = ring->desc32;
1024		descsize = sizeof (struct nfe_desc32);
1025	}
1026
1027	ring->cur = ring->next = 0;
1028
1029	error = bus_dma_tag_create(sc->nfe_parent_tag,
1030	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1031	    BUS_SPACE_MAXADDR,			/* lowaddr */
1032	    BUS_SPACE_MAXADDR,			/* highaddr */
1033	    NULL, NULL,				/* filter, filterarg */
1034	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1035	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
1036	    0,					/* flags */
1037	    NULL, NULL,				/* lockfunc, lockarg */
1038	    &ring->rx_desc_tag);
1039	if (error != 0) {
1040		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1041		goto fail;
1042	}
1043
1044	/* allocate memory to desc */
1045	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1046	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1047	if (error != 0) {
1048		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1049		goto fail;
1050	}
1051	if (sc->nfe_flags & NFE_40BIT_ADDR)
1052		ring->desc64 = desc;
1053	else
1054		ring->desc32 = desc;
1055
1056	/* map desc to device visible address space */
1057	ctx.nfe_busaddr = 0;
1058	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1059	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1060	if (error != 0) {
1061		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1062		goto fail;
1063	}
1064	ring->physaddr = ctx.nfe_busaddr;
1065
1066	error = bus_dma_tag_create(sc->nfe_parent_tag,
1067	    1, 0,			/* alignment, boundary */
1068	    BUS_SPACE_MAXADDR,		/* lowaddr */
1069	    BUS_SPACE_MAXADDR,		/* highaddr */
1070	    NULL, NULL,			/* filter, filterarg */
1071	    MCLBYTES, 1,		/* maxsize, nsegments */
1072	    MCLBYTES,			/* maxsegsize */
1073	    0,				/* flags */
1074	    NULL, NULL,			/* lockfunc, lockarg */
1075	    &ring->rx_data_tag);
1076	if (error != 0) {
1077		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1078		goto fail;
1079	}
1080
1081	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1082	if (error != 0) {
1083		device_printf(sc->nfe_dev,
1084		    "could not create Rx DMA spare map\n");
1085		goto fail;
1086	}
1087
1088	/*
1089	 * Pre-allocate Rx buffers and populate Rx ring.
1090	 */
1091	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1092		data = &sc->rxq.data[i];
1093		data->rx_data_map = NULL;
1094		data->m = NULL;
1095		error = bus_dmamap_create(ring->rx_data_tag, 0,
1096		    &data->rx_data_map);
1097		if (error != 0) {
1098			device_printf(sc->nfe_dev,
1099			    "could not create Rx DMA map\n");
1100			goto fail;
1101		}
1102	}
1103
1104fail:
1105	return (error);
1106}
1107
1108
1109static void
1110nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1111{
1112	struct nfe_dmamap_arg ctx;
1113	struct nfe_rx_data *data;
1114	void *desc;
1115	int i, error, descsize;
1116
1117	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1118		return;
1119	if (jumbo_disable != 0) {
1120		device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1121		sc->nfe_jumbo_disable = 1;
1122		return;
1123	}
1124
1125	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1126		desc = ring->jdesc64;
1127		descsize = sizeof (struct nfe_desc64);
1128	} else {
1129		desc = ring->jdesc32;
1130		descsize = sizeof (struct nfe_desc32);
1131	}
1132
1133	ring->jcur = ring->jnext = 0;
1134
1135	/* Create DMA tag for jumbo Rx ring. */
1136	error = bus_dma_tag_create(sc->nfe_parent_tag,
1137	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1138	    BUS_SPACE_MAXADDR,			/* lowaddr */
1139	    BUS_SPACE_MAXADDR,			/* highaddr */
1140	    NULL, NULL,				/* filter, filterarg */
1141	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
1142	    1, 					/* nsegments */
1143	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
1144	    0,					/* flags */
1145	    NULL, NULL,				/* lockfunc, lockarg */
1146	    &ring->jrx_desc_tag);
1147	if (error != 0) {
1148		device_printf(sc->nfe_dev,
1149		    "could not create jumbo ring DMA tag\n");
1150		goto fail;
1151	}
1152
1153	/* Create DMA tag for jumbo Rx buffers. */
1154	error = bus_dma_tag_create(sc->nfe_parent_tag,
1155	    1, 0,				/* alignment, boundary */
1156	    BUS_SPACE_MAXADDR,			/* lowaddr */
1157	    BUS_SPACE_MAXADDR,			/* highaddr */
1158	    NULL, NULL,				/* filter, filterarg */
1159	    MJUM9BYTES,				/* maxsize */
1160	    1,					/* nsegments */
1161	    MJUM9BYTES,				/* maxsegsize */
1162	    0,					/* flags */
1163	    NULL, NULL,				/* lockfunc, lockarg */
1164	    &ring->jrx_data_tag);
1165	if (error != 0) {
1166		device_printf(sc->nfe_dev,
1167		    "could not create jumbo Rx buffer DMA tag\n");
1168		goto fail;
1169	}
1170
1171	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1172	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1173	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1174	if (error != 0) {
1175		device_printf(sc->nfe_dev,
1176		    "could not allocate DMA'able memory for jumbo Rx ring\n");
1177		goto fail;
1178	}
1179	if (sc->nfe_flags & NFE_40BIT_ADDR)
1180		ring->jdesc64 = desc;
1181	else
1182		ring->jdesc32 = desc;
1183
1184	ctx.nfe_busaddr = 0;
1185	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1186	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1187	if (error != 0) {
1188		device_printf(sc->nfe_dev,
1189		    "could not load DMA'able memory for jumbo Rx ring\n");
1190		goto fail;
1191	}
1192	ring->jphysaddr = ctx.nfe_busaddr;
1193
1194	/* Create DMA maps for jumbo Rx buffers. */
1195	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1196	if (error != 0) {
1197		device_printf(sc->nfe_dev,
1198		    "could not create jumbo Rx DMA spare map\n");
1199		goto fail;
1200	}
1201
1202	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1203		data = &sc->jrxq.jdata[i];
1204		data->rx_data_map = NULL;
1205		data->m = NULL;
1206		error = bus_dmamap_create(ring->jrx_data_tag, 0,
1207		    &data->rx_data_map);
1208		if (error != 0) {
1209			device_printf(sc->nfe_dev,
1210			    "could not create jumbo Rx DMA map\n");
1211			goto fail;
1212		}
1213	}
1214
1215	return;
1216
1217fail:
1218	/*
1219	 * Running without jumbo frame support is ok for most cases
1220	 * so don't fail on creating dma tag/map for jumbo frame.
1221	 */
1222	nfe_free_jrx_ring(sc, ring);
1223	device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1224	    "resource shortage\n");
1225	sc->nfe_jumbo_disable = 1;
1226}
1227
1228
1229static int
1230nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1231{
1232	void *desc;
1233	size_t descsize;
1234	int i;
1235
1236	ring->cur = ring->next = 0;
1237	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1238		desc = ring->desc64;
1239		descsize = sizeof (struct nfe_desc64);
1240	} else {
1241		desc = ring->desc32;
1242		descsize = sizeof (struct nfe_desc32);
1243	}
1244	bzero(desc, descsize * NFE_RX_RING_COUNT);
1245	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1246		if (nfe_newbuf(sc, i) != 0)
1247			return (ENOBUFS);
1248	}
1249
1250	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1251	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1252
1253	return (0);
1254}
1255
1256
1257static int
1258nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1259{
1260	void *desc;
1261	size_t descsize;
1262	int i;
1263
1264	ring->jcur = ring->jnext = 0;
1265	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1266		desc = ring->jdesc64;
1267		descsize = sizeof (struct nfe_desc64);
1268	} else {
1269		desc = ring->jdesc32;
1270		descsize = sizeof (struct nfe_desc32);
1271	}
1272	bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1273	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1274		if (nfe_jnewbuf(sc, i) != 0)
1275			return (ENOBUFS);
1276	}
1277
1278	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1279	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1280
1281	return (0);
1282}
1283
1284
1285static void
1286nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1287{
1288	struct nfe_rx_data *data;
1289	void *desc;
1290	int i, descsize;
1291
1292	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1293		desc = ring->desc64;
1294		descsize = sizeof (struct nfe_desc64);
1295	} else {
1296		desc = ring->desc32;
1297		descsize = sizeof (struct nfe_desc32);
1298	}
1299
1300	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1301		data = &ring->data[i];
1302		if (data->rx_data_map != NULL) {
1303			bus_dmamap_destroy(ring->rx_data_tag,
1304			    data->rx_data_map);
1305			data->rx_data_map = NULL;
1306		}
1307		if (data->m != NULL) {
1308			m_freem(data->m);
1309			data->m = NULL;
1310		}
1311	}
1312	if (ring->rx_data_tag != NULL) {
1313		if (ring->rx_spare_map != NULL) {
1314			bus_dmamap_destroy(ring->rx_data_tag,
1315			    ring->rx_spare_map);
1316			ring->rx_spare_map = NULL;
1317		}
1318		bus_dma_tag_destroy(ring->rx_data_tag);
1319		ring->rx_data_tag = NULL;
1320	}
1321
1322	if (desc != NULL) {
1323		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1324		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1325		ring->desc64 = NULL;
1326		ring->desc32 = NULL;
1327		ring->rx_desc_map = NULL;
1328	}
1329	if (ring->rx_desc_tag != NULL) {
1330		bus_dma_tag_destroy(ring->rx_desc_tag);
1331		ring->rx_desc_tag = NULL;
1332	}
1333}
1334
1335
1336static void
1337nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1338{
1339	struct nfe_rx_data *data;
1340	void *desc;
1341	int i, descsize;
1342
1343	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1344		return;
1345
1346	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1347		desc = ring->jdesc64;
1348		descsize = sizeof (struct nfe_desc64);
1349	} else {
1350		desc = ring->jdesc32;
1351		descsize = sizeof (struct nfe_desc32);
1352	}
1353
1354	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1355		data = &ring->jdata[i];
1356		if (data->rx_data_map != NULL) {
1357			bus_dmamap_destroy(ring->jrx_data_tag,
1358			    data->rx_data_map);
1359			data->rx_data_map = NULL;
1360		}
1361		if (data->m != NULL) {
1362			m_freem(data->m);
1363			data->m = NULL;
1364		}
1365	}
1366	if (ring->jrx_data_tag != NULL) {
1367		if (ring->jrx_spare_map != NULL) {
1368			bus_dmamap_destroy(ring->jrx_data_tag,
1369			    ring->jrx_spare_map);
1370			ring->jrx_spare_map = NULL;
1371		}
1372		bus_dma_tag_destroy(ring->jrx_data_tag);
1373		ring->jrx_data_tag = NULL;
1374	}
1375
1376	if (desc != NULL) {
1377		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1378		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1379		ring->jdesc64 = NULL;
1380		ring->jdesc32 = NULL;
1381		ring->jrx_desc_map = NULL;
1382	}
1383
1384	if (ring->jrx_desc_tag != NULL) {
1385		bus_dma_tag_destroy(ring->jrx_desc_tag);
1386		ring->jrx_desc_tag = NULL;
1387	}
1388}
1389
1390
1391static int
1392nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1393{
1394	struct nfe_dmamap_arg ctx;
1395	int i, error;
1396	void *desc;
1397	int descsize;
1398
1399	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1400		desc = ring->desc64;
1401		descsize = sizeof (struct nfe_desc64);
1402	} else {
1403		desc = ring->desc32;
1404		descsize = sizeof (struct nfe_desc32);
1405	}
1406
1407	ring->queued = 0;
1408	ring->cur = ring->next = 0;
1409
1410	error = bus_dma_tag_create(sc->nfe_parent_tag,
1411	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1412	    BUS_SPACE_MAXADDR,			/* lowaddr */
1413	    BUS_SPACE_MAXADDR,			/* highaddr */
1414	    NULL, NULL,				/* filter, filterarg */
1415	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1416	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
1417	    0,					/* flags */
1418	    NULL, NULL,				/* lockfunc, lockarg */
1419	    &ring->tx_desc_tag);
1420	if (error != 0) {
1421		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1422		goto fail;
1423	}
1424
1425	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1426	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1427	if (error != 0) {
1428		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1429		goto fail;
1430	}
1431	if (sc->nfe_flags & NFE_40BIT_ADDR)
1432		ring->desc64 = desc;
1433	else
1434		ring->desc32 = desc;
1435
1436	ctx.nfe_busaddr = 0;
1437	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1438	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1439	if (error != 0) {
1440		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1441		goto fail;
1442	}
1443	ring->physaddr = ctx.nfe_busaddr;
1444
1445	error = bus_dma_tag_create(sc->nfe_parent_tag,
1446	    1, 0,
1447	    BUS_SPACE_MAXADDR,
1448	    BUS_SPACE_MAXADDR,
1449	    NULL, NULL,
1450	    NFE_TSO_MAXSIZE,
1451	    NFE_MAX_SCATTER,
1452	    NFE_TSO_MAXSGSIZE,
1453	    0,
1454	    NULL, NULL,
1455	    &ring->tx_data_tag);
1456	if (error != 0) {
1457		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1458		goto fail;
1459	}
1460
1461	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1462		error = bus_dmamap_create(ring->tx_data_tag, 0,
1463		    &ring->data[i].tx_data_map);
1464		if (error != 0) {
1465			device_printf(sc->nfe_dev,
1466			    "could not create Tx DMA map\n");
1467			goto fail;
1468		}
1469	}
1470
1471fail:
1472	return (error);
1473}
1474
1475
1476static void
1477nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1478{
1479	void *desc;
1480	size_t descsize;
1481
1482	sc->nfe_force_tx = 0;
1483	ring->queued = 0;
1484	ring->cur = ring->next = 0;
1485	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1486		desc = ring->desc64;
1487		descsize = sizeof (struct nfe_desc64);
1488	} else {
1489		desc = ring->desc32;
1490		descsize = sizeof (struct nfe_desc32);
1491	}
1492	bzero(desc, descsize * NFE_TX_RING_COUNT);
1493
1494	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1495	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1496}
1497
1498
1499static void
1500nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1501{
1502	struct nfe_tx_data *data;
1503	void *desc;
1504	int i, descsize;
1505
1506	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1507		desc = ring->desc64;
1508		descsize = sizeof (struct nfe_desc64);
1509	} else {
1510		desc = ring->desc32;
1511		descsize = sizeof (struct nfe_desc32);
1512	}
1513
1514	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1515		data = &ring->data[i];
1516
1517		if (data->m != NULL) {
1518			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1519			    BUS_DMASYNC_POSTWRITE);
1520			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1521			m_freem(data->m);
1522			data->m = NULL;
1523		}
1524		if (data->tx_data_map != NULL) {
1525			bus_dmamap_destroy(ring->tx_data_tag,
1526			    data->tx_data_map);
1527			data->tx_data_map = NULL;
1528		}
1529	}
1530
1531	if (ring->tx_data_tag != NULL) {
1532		bus_dma_tag_destroy(ring->tx_data_tag);
1533		ring->tx_data_tag = NULL;
1534	}
1535
1536	if (desc != NULL) {
1537		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1538		    BUS_DMASYNC_POSTWRITE);
1539		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1540		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1541		ring->desc64 = NULL;
1542		ring->desc32 = NULL;
1543		ring->tx_desc_map = NULL;
1544		bus_dma_tag_destroy(ring->tx_desc_tag);
1545		ring->tx_desc_tag = NULL;
1546	}
1547}
1548
1549#ifdef DEVICE_POLLING
1550static poll_handler_t nfe_poll;
1551
1552
1553static int
1554nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1555{
1556	struct nfe_softc *sc = ifp->if_softc;
1557	uint32_t r;
1558	int rx_npkts = 0;
1559
1560	NFE_LOCK(sc);
1561
1562	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1563		NFE_UNLOCK(sc);
1564		return (rx_npkts);
1565	}
1566
1567	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1568		rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1569	else
1570		rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1571	nfe_txeof(sc);
1572	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1573		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1574
1575	if (cmd == POLL_AND_CHECK_STATUS) {
1576		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1577			NFE_UNLOCK(sc);
1578			return (rx_npkts);
1579		}
1580		NFE_WRITE(sc, sc->nfe_irq_status, r);
1581
1582		if (r & NFE_IRQ_LINK) {
1583			NFE_READ(sc, NFE_PHY_STATUS);
1584			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1585			DPRINTF(sc, "link state changed\n");
1586		}
1587	}
1588	NFE_UNLOCK(sc);
1589	return (rx_npkts);
1590}
1591#endif /* DEVICE_POLLING */
1592
1593static void
1594nfe_set_intr(struct nfe_softc *sc)
1595{
1596
1597	if (sc->nfe_msi != 0)
1598		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1599}
1600
1601
1602/* In MSIX, a write to mask reegisters behaves as XOR. */
1603static __inline void
1604nfe_enable_intr(struct nfe_softc *sc)
1605{
1606
1607	if (sc->nfe_msix != 0) {
1608		/* XXX Should have a better way to enable interrupts! */
1609		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1610			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1611	} else
1612		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1613}
1614
1615
1616static __inline void
1617nfe_disable_intr(struct nfe_softc *sc)
1618{
1619
1620	if (sc->nfe_msix != 0) {
1621		/* XXX Should have a better way to disable interrupts! */
1622		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1623			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1624	} else
1625		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1626}
1627
1628
1629static int
1630nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1631{
1632	struct nfe_softc *sc;
1633	struct ifreq *ifr;
1634	struct mii_data *mii;
1635	int error, init, mask;
1636
1637	sc = ifp->if_softc;
1638	ifr = (struct ifreq *) data;
1639	error = 0;
1640	init = 0;
1641	switch (cmd) {
1642	case SIOCSIFMTU:
1643		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1644			error = EINVAL;
1645		else if (ifp->if_mtu != ifr->ifr_mtu) {
1646			if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1647			    (sc->nfe_jumbo_disable != 0)) &&
1648			    ifr->ifr_mtu > ETHERMTU)
1649				error = EINVAL;
1650			else {
1651				NFE_LOCK(sc);
1652				ifp->if_mtu = ifr->ifr_mtu;
1653				if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1654					nfe_init_locked(sc);
1655				NFE_UNLOCK(sc);
1656			}
1657		}
1658		break;
1659	case SIOCSIFFLAGS:
1660		NFE_LOCK(sc);
1661		if (ifp->if_flags & IFF_UP) {
1662			/*
1663			 * If only the PROMISC or ALLMULTI flag changes, then
1664			 * don't do a full re-init of the chip, just update
1665			 * the Rx filter.
1666			 */
1667			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1668			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1669			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1670				nfe_setmulti(sc);
1671			else
1672				nfe_init_locked(sc);
1673		} else {
1674			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1675				nfe_stop(ifp);
1676		}
1677		sc->nfe_if_flags = ifp->if_flags;
1678		NFE_UNLOCK(sc);
1679		error = 0;
1680		break;
1681	case SIOCADDMULTI:
1682	case SIOCDELMULTI:
1683		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1684			NFE_LOCK(sc);
1685			nfe_setmulti(sc);
1686			NFE_UNLOCK(sc);
1687			error = 0;
1688		}
1689		break;
1690	case SIOCSIFMEDIA:
1691	case SIOCGIFMEDIA:
1692		mii = device_get_softc(sc->nfe_miibus);
1693		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1694		break;
1695	case SIOCSIFCAP:
1696		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1697#ifdef DEVICE_POLLING
1698		if ((mask & IFCAP_POLLING) != 0) {
1699			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1700				error = ether_poll_register(nfe_poll, ifp);
1701				if (error)
1702					break;
1703				NFE_LOCK(sc);
1704				nfe_disable_intr(sc);
1705				ifp->if_capenable |= IFCAP_POLLING;
1706				NFE_UNLOCK(sc);
1707			} else {
1708				error = ether_poll_deregister(ifp);
1709				/* Enable interrupt even in error case */
1710				NFE_LOCK(sc);
1711				nfe_enable_intr(sc);
1712				ifp->if_capenable &= ~IFCAP_POLLING;
1713				NFE_UNLOCK(sc);
1714			}
1715		}
1716#endif /* DEVICE_POLLING */
1717		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1718		    (mask & IFCAP_HWCSUM) != 0) {
1719			ifp->if_capenable ^= IFCAP_HWCSUM;
1720			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1721			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1722				ifp->if_hwassist |= NFE_CSUM_FEATURES;
1723			else
1724				ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1725			init++;
1726		}
1727		if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1728		    (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1729			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1730			init++;
1731		}
1732		/*
1733		 * XXX
1734		 * It seems that VLAN stripping requires Rx checksum offload.
1735		 * Unfortunately FreeBSD has no way to disable only Rx side
1736		 * VLAN stripping. So when we know Rx checksum offload is
1737		 * disabled turn entire hardware VLAN assist off.
1738		 */
1739		if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1740		    (NFE_HW_CSUM | NFE_HW_VLAN)) {
1741			if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1742				ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1743		}
1744
1745		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1746		    (mask & IFCAP_TSO4) != 0) {
1747			ifp->if_capenable ^= IFCAP_TSO4;
1748			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1749			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1750				ifp->if_hwassist |= CSUM_TSO;
1751			else
1752				ifp->if_hwassist &= ~CSUM_TSO;
1753		}
1754
1755		if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1756			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1757			nfe_init(sc);
1758		}
1759		if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1760			VLAN_CAPABILITIES(ifp);
1761		break;
1762	default:
1763		error = ether_ioctl(ifp, cmd, data);
1764		break;
1765	}
1766
1767	return (error);
1768}
1769
1770
1771static int
1772nfe_intr(void *arg)
1773{
1774	struct nfe_softc *sc;
1775	uint32_t status;
1776
1777	sc = (struct nfe_softc *)arg;
1778
1779	status = NFE_READ(sc, sc->nfe_irq_status);
1780	if (status == 0 || status == 0xffffffff)
1781		return (FILTER_STRAY);
1782	nfe_disable_intr(sc);
1783	taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1784
1785	return (FILTER_HANDLED);
1786}
1787
1788
1789static void
1790nfe_int_task(void *arg, int pending)
1791{
1792	struct nfe_softc *sc = arg;
1793	struct ifnet *ifp = sc->nfe_ifp;
1794	uint32_t r;
1795	int domore;
1796
1797	NFE_LOCK(sc);
1798
1799	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1800		nfe_enable_intr(sc);
1801		NFE_UNLOCK(sc);
1802		return;	/* not for us */
1803	}
1804	NFE_WRITE(sc, sc->nfe_irq_status, r);
1805
1806	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1807
1808#ifdef DEVICE_POLLING
1809	if (ifp->if_capenable & IFCAP_POLLING) {
1810		NFE_UNLOCK(sc);
1811		return;
1812	}
1813#endif
1814
1815	if (r & NFE_IRQ_LINK) {
1816		NFE_READ(sc, NFE_PHY_STATUS);
1817		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1818		DPRINTF(sc, "link state changed\n");
1819	}
1820
1821	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1822		NFE_UNLOCK(sc);
1823		nfe_enable_intr(sc);
1824		return;
1825	}
1826
1827	domore = 0;
1828	/* check Rx ring */
1829	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1830		domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1831	else
1832		domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1833	/* check Tx ring */
1834	nfe_txeof(sc);
1835
1836	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1837		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1838
1839	NFE_UNLOCK(sc);
1840
1841	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1842		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1843		return;
1844	}
1845
1846	/* Reenable interrupts. */
1847	nfe_enable_intr(sc);
1848}
1849
1850
1851static __inline void
1852nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1853{
1854	struct nfe_desc32 *desc32;
1855	struct nfe_desc64 *desc64;
1856	struct nfe_rx_data *data;
1857	struct mbuf *m;
1858
1859	data = &sc->rxq.data[idx];
1860	m = data->m;
1861
1862	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1863		desc64 = &sc->rxq.desc64[idx];
1864		/* VLAN packet may have overwritten it. */
1865		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1866		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1867		desc64->length = htole16(m->m_len);
1868		desc64->flags = htole16(NFE_RX_READY);
1869	} else {
1870		desc32 = &sc->rxq.desc32[idx];
1871		desc32->length = htole16(m->m_len);
1872		desc32->flags = htole16(NFE_RX_READY);
1873	}
1874}
1875
1876
1877static __inline void
1878nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1879{
1880	struct nfe_desc32 *desc32;
1881	struct nfe_desc64 *desc64;
1882	struct nfe_rx_data *data;
1883	struct mbuf *m;
1884
1885	data = &sc->jrxq.jdata[idx];
1886	m = data->m;
1887
1888	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1889		desc64 = &sc->jrxq.jdesc64[idx];
1890		/* VLAN packet may have overwritten it. */
1891		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1892		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1893		desc64->length = htole16(m->m_len);
1894		desc64->flags = htole16(NFE_RX_READY);
1895	} else {
1896		desc32 = &sc->jrxq.jdesc32[idx];
1897		desc32->length = htole16(m->m_len);
1898		desc32->flags = htole16(NFE_RX_READY);
1899	}
1900}
1901
1902
1903static int
1904nfe_newbuf(struct nfe_softc *sc, int idx)
1905{
1906	struct nfe_rx_data *data;
1907	struct nfe_desc32 *desc32;
1908	struct nfe_desc64 *desc64;
1909	struct mbuf *m;
1910	bus_dma_segment_t segs[1];
1911	bus_dmamap_t map;
1912	int nsegs;
1913
1914	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1915	if (m == NULL)
1916		return (ENOBUFS);
1917
1918	m->m_len = m->m_pkthdr.len = MCLBYTES;
1919	m_adj(m, ETHER_ALIGN);
1920
1921	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1922	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1923		m_freem(m);
1924		return (ENOBUFS);
1925	}
1926	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1927
1928	data = &sc->rxq.data[idx];
1929	if (data->m != NULL) {
1930		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1931		    BUS_DMASYNC_POSTREAD);
1932		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1933	}
1934	map = data->rx_data_map;
1935	data->rx_data_map = sc->rxq.rx_spare_map;
1936	sc->rxq.rx_spare_map = map;
1937	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1938	    BUS_DMASYNC_PREREAD);
1939	data->paddr = segs[0].ds_addr;
1940	data->m = m;
1941	/* update mapping address in h/w descriptor */
1942	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1943		desc64 = &sc->rxq.desc64[idx];
1944		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1945		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1946		desc64->length = htole16(segs[0].ds_len);
1947		desc64->flags = htole16(NFE_RX_READY);
1948	} else {
1949		desc32 = &sc->rxq.desc32[idx];
1950		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1951		desc32->length = htole16(segs[0].ds_len);
1952		desc32->flags = htole16(NFE_RX_READY);
1953	}
1954
1955	return (0);
1956}
1957
1958
1959static int
1960nfe_jnewbuf(struct nfe_softc *sc, int idx)
1961{
1962	struct nfe_rx_data *data;
1963	struct nfe_desc32 *desc32;
1964	struct nfe_desc64 *desc64;
1965	struct mbuf *m;
1966	bus_dma_segment_t segs[1];
1967	bus_dmamap_t map;
1968	int nsegs;
1969
1970	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1971	if (m == NULL)
1972		return (ENOBUFS);
1973	if ((m->m_flags & M_EXT) == 0) {
1974		m_freem(m);
1975		return (ENOBUFS);
1976	}
1977	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1978	m_adj(m, ETHER_ALIGN);
1979
1980	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
1981	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1982		m_freem(m);
1983		return (ENOBUFS);
1984	}
1985	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1986
1987	data = &sc->jrxq.jdata[idx];
1988	if (data->m != NULL) {
1989		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
1990		    BUS_DMASYNC_POSTREAD);
1991		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
1992	}
1993	map = data->rx_data_map;
1994	data->rx_data_map = sc->jrxq.jrx_spare_map;
1995	sc->jrxq.jrx_spare_map = map;
1996	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
1997	    BUS_DMASYNC_PREREAD);
1998	data->paddr = segs[0].ds_addr;
1999	data->m = m;
2000	/* update mapping address in h/w descriptor */
2001	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2002		desc64 = &sc->jrxq.jdesc64[idx];
2003		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2004		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2005		desc64->length = htole16(segs[0].ds_len);
2006		desc64->flags = htole16(NFE_RX_READY);
2007	} else {
2008		desc32 = &sc->jrxq.jdesc32[idx];
2009		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2010		desc32->length = htole16(segs[0].ds_len);
2011		desc32->flags = htole16(NFE_RX_READY);
2012	}
2013
2014	return (0);
2015}
2016
2017
2018static int
2019nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2020{
2021	struct ifnet *ifp = sc->nfe_ifp;
2022	struct nfe_desc32 *desc32;
2023	struct nfe_desc64 *desc64;
2024	struct nfe_rx_data *data;
2025	struct mbuf *m;
2026	uint16_t flags;
2027	int len, prog, rx_npkts;
2028	uint32_t vtag = 0;
2029
2030	rx_npkts = 0;
2031	NFE_LOCK_ASSERT(sc);
2032
2033	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2034	    BUS_DMASYNC_POSTREAD);
2035
2036	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2037		if (count <= 0)
2038			break;
2039		count--;
2040
2041		data = &sc->rxq.data[sc->rxq.cur];
2042
2043		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2044			desc64 = &sc->rxq.desc64[sc->rxq.cur];
2045			vtag = le32toh(desc64->physaddr[1]);
2046			flags = le16toh(desc64->flags);
2047			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2048		} else {
2049			desc32 = &sc->rxq.desc32[sc->rxq.cur];
2050			flags = le16toh(desc32->flags);
2051			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2052		}
2053
2054		if (flags & NFE_RX_READY)
2055			break;
2056		prog++;
2057		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2058			if (!(flags & NFE_RX_VALID_V1)) {
2059				ifp->if_ierrors++;
2060				nfe_discard_rxbuf(sc, sc->rxq.cur);
2061				continue;
2062			}
2063			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2064				flags &= ~NFE_RX_ERROR;
2065				len--;	/* fix buffer length */
2066			}
2067		} else {
2068			if (!(flags & NFE_RX_VALID_V2)) {
2069				ifp->if_ierrors++;
2070				nfe_discard_rxbuf(sc, sc->rxq.cur);
2071				continue;
2072			}
2073
2074			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2075				flags &= ~NFE_RX_ERROR;
2076				len--;	/* fix buffer length */
2077			}
2078		}
2079
2080		if (flags & NFE_RX_ERROR) {
2081			ifp->if_ierrors++;
2082			nfe_discard_rxbuf(sc, sc->rxq.cur);
2083			continue;
2084		}
2085
2086		m = data->m;
2087		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2088			ifp->if_iqdrops++;
2089			nfe_discard_rxbuf(sc, sc->rxq.cur);
2090			continue;
2091		}
2092
2093		if ((vtag & NFE_RX_VTAG) != 0 &&
2094		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2095			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2096			m->m_flags |= M_VLANTAG;
2097		}
2098
2099		m->m_pkthdr.len = m->m_len = len;
2100		m->m_pkthdr.rcvif = ifp;
2101
2102		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2103			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2104				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2105				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2106				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2107				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2108					m->m_pkthdr.csum_flags |=
2109					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2110					m->m_pkthdr.csum_data = 0xffff;
2111				}
2112			}
2113		}
2114
2115		ifp->if_ipackets++;
2116
2117		NFE_UNLOCK(sc);
2118		(*ifp->if_input)(ifp, m);
2119		NFE_LOCK(sc);
2120		rx_npkts++;
2121	}
2122
2123	if (prog > 0)
2124		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2125		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2126
2127	if (rx_npktsp != NULL)
2128		*rx_npktsp = rx_npkts;
2129	return (count > 0 ? 0 : EAGAIN);
2130}
2131
2132
2133static int
2134nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2135{
2136	struct ifnet *ifp = sc->nfe_ifp;
2137	struct nfe_desc32 *desc32;
2138	struct nfe_desc64 *desc64;
2139	struct nfe_rx_data *data;
2140	struct mbuf *m;
2141	uint16_t flags;
2142	int len, prog, rx_npkts;
2143	uint32_t vtag = 0;
2144
2145	rx_npkts = 0;
2146	NFE_LOCK_ASSERT(sc);
2147
2148	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2149	    BUS_DMASYNC_POSTREAD);
2150
2151	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2152	    vtag = 0) {
2153		if (count <= 0)
2154			break;
2155		count--;
2156
2157		data = &sc->jrxq.jdata[sc->jrxq.jcur];
2158
2159		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2160			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2161			vtag = le32toh(desc64->physaddr[1]);
2162			flags = le16toh(desc64->flags);
2163			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2164		} else {
2165			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2166			flags = le16toh(desc32->flags);
2167			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2168		}
2169
2170		if (flags & NFE_RX_READY)
2171			break;
2172		prog++;
2173		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2174			if (!(flags & NFE_RX_VALID_V1)) {
2175				ifp->if_ierrors++;
2176				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2177				continue;
2178			}
2179			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2180				flags &= ~NFE_RX_ERROR;
2181				len--;	/* fix buffer length */
2182			}
2183		} else {
2184			if (!(flags & NFE_RX_VALID_V2)) {
2185				ifp->if_ierrors++;
2186				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2187				continue;
2188			}
2189
2190			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2191				flags &= ~NFE_RX_ERROR;
2192				len--;	/* fix buffer length */
2193			}
2194		}
2195
2196		if (flags & NFE_RX_ERROR) {
2197			ifp->if_ierrors++;
2198			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2199			continue;
2200		}
2201
2202		m = data->m;
2203		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2204			ifp->if_iqdrops++;
2205			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2206			continue;
2207		}
2208
2209		if ((vtag & NFE_RX_VTAG) != 0 &&
2210		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2211			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2212			m->m_flags |= M_VLANTAG;
2213		}
2214
2215		m->m_pkthdr.len = m->m_len = len;
2216		m->m_pkthdr.rcvif = ifp;
2217
2218		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2219			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2220				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2221				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2222				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2223				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2224					m->m_pkthdr.csum_flags |=
2225					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2226					m->m_pkthdr.csum_data = 0xffff;
2227				}
2228			}
2229		}
2230
2231		ifp->if_ipackets++;
2232
2233		NFE_UNLOCK(sc);
2234		(*ifp->if_input)(ifp, m);
2235		NFE_LOCK(sc);
2236		rx_npkts++;
2237	}
2238
2239	if (prog > 0)
2240		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2241		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2242
2243	if (rx_npktsp != NULL)
2244		*rx_npktsp = rx_npkts;
2245	return (count > 0 ? 0 : EAGAIN);
2246}
2247
2248
2249static void
2250nfe_txeof(struct nfe_softc *sc)
2251{
2252	struct ifnet *ifp = sc->nfe_ifp;
2253	struct nfe_desc32 *desc32;
2254	struct nfe_desc64 *desc64;
2255	struct nfe_tx_data *data = NULL;
2256	uint16_t flags;
2257	int cons, prog;
2258
2259	NFE_LOCK_ASSERT(sc);
2260
2261	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2262	    BUS_DMASYNC_POSTREAD);
2263
2264	prog = 0;
2265	for (cons = sc->txq.next; cons != sc->txq.cur;
2266	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
2267		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2268			desc64 = &sc->txq.desc64[cons];
2269			flags = le16toh(desc64->flags);
2270		} else {
2271			desc32 = &sc->txq.desc32[cons];
2272			flags = le16toh(desc32->flags);
2273		}
2274
2275		if (flags & NFE_TX_VALID)
2276			break;
2277
2278		prog++;
2279		sc->txq.queued--;
2280		data = &sc->txq.data[cons];
2281
2282		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2283			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2284				continue;
2285			if ((flags & NFE_TX_ERROR_V1) != 0) {
2286				device_printf(sc->nfe_dev,
2287				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2288
2289				ifp->if_oerrors++;
2290			} else
2291				ifp->if_opackets++;
2292		} else {
2293			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2294				continue;
2295			if ((flags & NFE_TX_ERROR_V2) != 0) {
2296				device_printf(sc->nfe_dev,
2297				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2298				ifp->if_oerrors++;
2299			} else
2300				ifp->if_opackets++;
2301		}
2302
2303		/* last fragment of the mbuf chain transmitted */
2304		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2305		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2306		    BUS_DMASYNC_POSTWRITE);
2307		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2308		m_freem(data->m);
2309		data->m = NULL;
2310	}
2311
2312	if (prog > 0) {
2313		sc->nfe_force_tx = 0;
2314		sc->txq.next = cons;
2315		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2316		if (sc->txq.queued == 0)
2317			sc->nfe_watchdog_timer = 0;
2318	}
2319}
2320
2321static int
2322nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2323{
2324	struct nfe_desc32 *desc32 = NULL;
2325	struct nfe_desc64 *desc64 = NULL;
2326	bus_dmamap_t map;
2327	bus_dma_segment_t segs[NFE_MAX_SCATTER];
2328	int error, i, nsegs, prod, si;
2329	uint32_t tso_segsz;
2330	uint16_t cflags, flags;
2331	struct mbuf *m;
2332
2333	prod = si = sc->txq.cur;
2334	map = sc->txq.data[prod].tx_data_map;
2335
2336	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2337	    &nsegs, BUS_DMA_NOWAIT);
2338	if (error == EFBIG) {
2339		m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2340		if (m == NULL) {
2341			m_freem(*m_head);
2342			*m_head = NULL;
2343			return (ENOBUFS);
2344		}
2345		*m_head = m;
2346		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2347		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2348		if (error != 0) {
2349			m_freem(*m_head);
2350			*m_head = NULL;
2351			return (ENOBUFS);
2352		}
2353	} else if (error != 0)
2354		return (error);
2355	if (nsegs == 0) {
2356		m_freem(*m_head);
2357		*m_head = NULL;
2358		return (EIO);
2359	}
2360
2361	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2362		bus_dmamap_unload(sc->txq.tx_data_tag, map);
2363		return (ENOBUFS);
2364	}
2365
2366	m = *m_head;
2367	cflags = flags = 0;
2368	tso_segsz = 0;
2369	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2370		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2371		    NFE_TX_TSO_SHIFT;
2372		cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2373		cflags |= NFE_TX_TSO;
2374	} else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2375		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2376			cflags |= NFE_TX_IP_CSUM;
2377		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2378			cflags |= NFE_TX_TCP_UDP_CSUM;
2379		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2380			cflags |= NFE_TX_TCP_UDP_CSUM;
2381	}
2382
2383	for (i = 0; i < nsegs; i++) {
2384		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2385			desc64 = &sc->txq.desc64[prod];
2386			desc64->physaddr[0] =
2387			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
2388			desc64->physaddr[1] =
2389			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2390			desc64->vtag = 0;
2391			desc64->length = htole16(segs[i].ds_len - 1);
2392			desc64->flags = htole16(flags);
2393		} else {
2394			desc32 = &sc->txq.desc32[prod];
2395			desc32->physaddr =
2396			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2397			desc32->length = htole16(segs[i].ds_len - 1);
2398			desc32->flags = htole16(flags);
2399		}
2400
2401		/*
2402		 * Setting of the valid bit in the first descriptor is
2403		 * deferred until the whole chain is fully setup.
2404		 */
2405		flags |= NFE_TX_VALID;
2406
2407		sc->txq.queued++;
2408		NFE_INC(prod, NFE_TX_RING_COUNT);
2409	}
2410
2411	/*
2412	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2413	 * csum flags, vtag and TSO belong to the first fragment only.
2414	 */
2415	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2416		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2417		desc64 = &sc->txq.desc64[si];
2418		if ((m->m_flags & M_VLANTAG) != 0)
2419			desc64->vtag = htole32(NFE_TX_VTAG |
2420			    m->m_pkthdr.ether_vtag);
2421		if (tso_segsz != 0) {
2422			/*
2423			 * XXX
2424			 * The following indicates the descriptor element
2425			 * is a 32bit quantity.
2426			 */
2427			desc64->length |= htole16((uint16_t)tso_segsz);
2428			desc64->flags |= htole16(tso_segsz >> 16);
2429		}
2430		/*
2431		 * finally, set the valid/checksum/TSO bit in the first
2432		 * descriptor.
2433		 */
2434		desc64->flags |= htole16(NFE_TX_VALID | cflags);
2435	} else {
2436		if (sc->nfe_flags & NFE_JUMBO_SUP)
2437			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2438		else
2439			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2440		desc32 = &sc->txq.desc32[si];
2441		if (tso_segsz != 0) {
2442			/*
2443			 * XXX
2444			 * The following indicates the descriptor element
2445			 * is a 32bit quantity.
2446			 */
2447			desc32->length |= htole16((uint16_t)tso_segsz);
2448			desc32->flags |= htole16(tso_segsz >> 16);
2449		}
2450		/*
2451		 * finally, set the valid/checksum/TSO bit in the first
2452		 * descriptor.
2453		 */
2454		desc32->flags |= htole16(NFE_TX_VALID | cflags);
2455	}
2456
2457	sc->txq.cur = prod;
2458	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2459	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2460	sc->txq.data[prod].tx_data_map = map;
2461	sc->txq.data[prod].m = m;
2462
2463	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2464
2465	return (0);
2466}
2467
2468
2469static void
2470nfe_setmulti(struct nfe_softc *sc)
2471{
2472	struct ifnet *ifp = sc->nfe_ifp;
2473	struct ifmultiaddr *ifma;
2474	int i;
2475	uint32_t filter;
2476	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2477	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2478		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2479	};
2480
2481	NFE_LOCK_ASSERT(sc);
2482
2483	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2484		bzero(addr, ETHER_ADDR_LEN);
2485		bzero(mask, ETHER_ADDR_LEN);
2486		goto done;
2487	}
2488
2489	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2490	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2491
2492	if_maddr_rlock(ifp);
2493	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2494		u_char *addrp;
2495
2496		if (ifma->ifma_addr->sa_family != AF_LINK)
2497			continue;
2498
2499		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2500		for (i = 0; i < ETHER_ADDR_LEN; i++) {
2501			u_int8_t mcaddr = addrp[i];
2502			addr[i] &= mcaddr;
2503			mask[i] &= ~mcaddr;
2504		}
2505	}
2506	if_maddr_runlock(ifp);
2507
2508	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2509		mask[i] |= addr[i];
2510	}
2511
2512done:
2513	addr[0] |= 0x01;	/* make sure multicast bit is set */
2514
2515	NFE_WRITE(sc, NFE_MULTIADDR_HI,
2516	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2517	NFE_WRITE(sc, NFE_MULTIADDR_LO,
2518	    addr[5] <<  8 | addr[4]);
2519	NFE_WRITE(sc, NFE_MULTIMASK_HI,
2520	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2521	NFE_WRITE(sc, NFE_MULTIMASK_LO,
2522	    mask[5] <<  8 | mask[4]);
2523
2524	filter = NFE_READ(sc, NFE_RXFILTER);
2525	filter &= NFE_PFF_RX_PAUSE;
2526	filter |= NFE_RXFILTER_MAGIC;
2527	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2528	NFE_WRITE(sc, NFE_RXFILTER, filter);
2529}
2530
2531
2532static void
2533nfe_tx_task(void *arg, int pending)
2534{
2535	struct ifnet *ifp;
2536
2537	ifp = (struct ifnet *)arg;
2538	nfe_start(ifp);
2539}
2540
2541
2542static void
2543nfe_start(struct ifnet *ifp)
2544{
2545	struct nfe_softc *sc = ifp->if_softc;
2546	struct mbuf *m0;
2547	int enq;
2548
2549	NFE_LOCK(sc);
2550
2551	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2552	    IFF_DRV_RUNNING || sc->nfe_link == 0) {
2553		NFE_UNLOCK(sc);
2554		return;
2555	}
2556
2557	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2558		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2559		if (m0 == NULL)
2560			break;
2561
2562		if (nfe_encap(sc, &m0) != 0) {
2563			if (m0 == NULL)
2564				break;
2565			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2566			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2567			break;
2568		}
2569		enq++;
2570		ETHER_BPF_MTAP(ifp, m0);
2571	}
2572
2573	if (enq > 0) {
2574		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2575		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2576
2577		/* kick Tx */
2578		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2579
2580		/*
2581		 * Set a timeout in case the chip goes out to lunch.
2582		 */
2583		sc->nfe_watchdog_timer = 5;
2584	}
2585
2586	NFE_UNLOCK(sc);
2587}
2588
2589
2590static void
2591nfe_watchdog(struct ifnet *ifp)
2592{
2593	struct nfe_softc *sc = ifp->if_softc;
2594
2595	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2596		return;
2597
2598	/* Check if we've lost Tx completion interrupt. */
2599	nfe_txeof(sc);
2600	if (sc->txq.queued == 0) {
2601		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2602		    "-- recovering\n");
2603		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2604			taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2605		return;
2606	}
2607	/* Check if we've lost start Tx command. */
2608	sc->nfe_force_tx++;
2609	if (sc->nfe_force_tx <= 3) {
2610		/*
2611		 * If this is the case for watchdog timeout, the following
2612		 * code should go to nfe_txeof().
2613		 */
2614		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2615		return;
2616	}
2617	sc->nfe_force_tx = 0;
2618
2619	if_printf(ifp, "watchdog timeout\n");
2620
2621	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2622	ifp->if_oerrors++;
2623	nfe_init_locked(sc);
2624}
2625
2626
2627static void
2628nfe_init(void *xsc)
2629{
2630	struct nfe_softc *sc = xsc;
2631
2632	NFE_LOCK(sc);
2633	nfe_init_locked(sc);
2634	NFE_UNLOCK(sc);
2635}
2636
2637
2638static void
2639nfe_init_locked(void *xsc)
2640{
2641	struct nfe_softc *sc = xsc;
2642	struct ifnet *ifp = sc->nfe_ifp;
2643	struct mii_data *mii;
2644	uint32_t val;
2645	int error;
2646
2647	NFE_LOCK_ASSERT(sc);
2648
2649	mii = device_get_softc(sc->nfe_miibus);
2650
2651	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2652		return;
2653
2654	nfe_stop(ifp);
2655
2656	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2657
2658	nfe_init_tx_ring(sc, &sc->txq);
2659	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2660		error = nfe_init_jrx_ring(sc, &sc->jrxq);
2661	else
2662		error = nfe_init_rx_ring(sc, &sc->rxq);
2663	if (error != 0) {
2664		device_printf(sc->nfe_dev,
2665		    "initialization failed: no memory for rx buffers\n");
2666		nfe_stop(ifp);
2667		return;
2668	}
2669
2670	val = 0;
2671	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2672		val |= NFE_MAC_ADDR_INORDER;
2673	NFE_WRITE(sc, NFE_TX_UNK, val);
2674	NFE_WRITE(sc, NFE_STATUS, 0);
2675
2676	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2677		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2678
2679	sc->rxtxctl = NFE_RXTX_BIT2;
2680	if (sc->nfe_flags & NFE_40BIT_ADDR)
2681		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2682	else if (sc->nfe_flags & NFE_JUMBO_SUP)
2683		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2684
2685	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2686		sc->rxtxctl |= NFE_RXTX_RXCSUM;
2687	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2688		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2689
2690	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2691	DELAY(10);
2692	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2693
2694	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2695		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2696	else
2697		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2698
2699	NFE_WRITE(sc, NFE_SETUP_R6, 0);
2700
2701	/* set MAC address */
2702	nfe_set_macaddr(sc, IF_LLADDR(ifp));
2703
2704	/* tell MAC where rings are in memory */
2705	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2706		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2707		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
2708		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2709		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
2710	} else {
2711		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2712		    NFE_ADDR_HI(sc->rxq.physaddr));
2713		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2714		    NFE_ADDR_LO(sc->rxq.physaddr));
2715	}
2716	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2717	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2718
2719	NFE_WRITE(sc, NFE_RING_SIZE,
2720	    (NFE_RX_RING_COUNT - 1) << 16 |
2721	    (NFE_TX_RING_COUNT - 1));
2722
2723	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2724
2725	/* force MAC to wakeup */
2726	val = NFE_READ(sc, NFE_PWR_STATE);
2727	if ((val & NFE_PWR_WAKEUP) == 0)
2728		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2729	DELAY(10);
2730	val = NFE_READ(sc, NFE_PWR_STATE);
2731	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2732
2733#if 1
2734	/* configure interrupts coalescing/mitigation */
2735	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2736#else
2737	/* no interrupt mitigation: one interrupt per packet */
2738	NFE_WRITE(sc, NFE_IMTIMER, 970);
2739#endif
2740
2741	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2742	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2743	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2744
2745	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2746	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2747
2748	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2749	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
2750
2751	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2752	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2753	DELAY(10);
2754	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2755
2756	/* set Rx filter */
2757	nfe_setmulti(sc);
2758
2759	/* enable Rx */
2760	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2761
2762	/* enable Tx */
2763	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2764
2765	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2766
2767	/* Clear hardware stats. */
2768	nfe_stats_clear(sc);
2769
2770#ifdef DEVICE_POLLING
2771	if (ifp->if_capenable & IFCAP_POLLING)
2772		nfe_disable_intr(sc);
2773	else
2774#endif
2775	nfe_set_intr(sc);
2776	nfe_enable_intr(sc); /* enable interrupts */
2777
2778	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2779	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2780
2781	sc->nfe_link = 0;
2782	mii_mediachg(mii);
2783
2784	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2785}
2786
2787
2788static void
2789nfe_stop(struct ifnet *ifp)
2790{
2791	struct nfe_softc *sc = ifp->if_softc;
2792	struct nfe_rx_ring *rx_ring;
2793	struct nfe_jrx_ring *jrx_ring;
2794	struct nfe_tx_ring *tx_ring;
2795	struct nfe_rx_data *rdata;
2796	struct nfe_tx_data *tdata;
2797	int i;
2798
2799	NFE_LOCK_ASSERT(sc);
2800
2801	sc->nfe_watchdog_timer = 0;
2802	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2803
2804	callout_stop(&sc->nfe_stat_ch);
2805
2806	/* abort Tx */
2807	NFE_WRITE(sc, NFE_TX_CTL, 0);
2808
2809	/* disable Rx */
2810	NFE_WRITE(sc, NFE_RX_CTL, 0);
2811
2812	/* disable interrupts */
2813	nfe_disable_intr(sc);
2814
2815	sc->nfe_link = 0;
2816
2817	/* free Rx and Tx mbufs still in the queues. */
2818	rx_ring = &sc->rxq;
2819	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2820		rdata = &rx_ring->data[i];
2821		if (rdata->m != NULL) {
2822			bus_dmamap_sync(rx_ring->rx_data_tag,
2823			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2824			bus_dmamap_unload(rx_ring->rx_data_tag,
2825			    rdata->rx_data_map);
2826			m_freem(rdata->m);
2827			rdata->m = NULL;
2828		}
2829	}
2830
2831	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2832		jrx_ring = &sc->jrxq;
2833		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2834			rdata = &jrx_ring->jdata[i];
2835			if (rdata->m != NULL) {
2836				bus_dmamap_sync(jrx_ring->jrx_data_tag,
2837				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2838				bus_dmamap_unload(jrx_ring->jrx_data_tag,
2839				    rdata->rx_data_map);
2840				m_freem(rdata->m);
2841				rdata->m = NULL;
2842			}
2843		}
2844	}
2845
2846	tx_ring = &sc->txq;
2847	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2848		tdata = &tx_ring->data[i];
2849		if (tdata->m != NULL) {
2850			bus_dmamap_sync(tx_ring->tx_data_tag,
2851			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2852			bus_dmamap_unload(tx_ring->tx_data_tag,
2853			    tdata->tx_data_map);
2854			m_freem(tdata->m);
2855			tdata->m = NULL;
2856		}
2857	}
2858	/* Update hardware stats. */
2859	nfe_stats_update(sc);
2860}
2861
2862
2863static int
2864nfe_ifmedia_upd(struct ifnet *ifp)
2865{
2866	struct nfe_softc *sc = ifp->if_softc;
2867	struct mii_data *mii;
2868
2869	NFE_LOCK(sc);
2870	mii = device_get_softc(sc->nfe_miibus);
2871	mii_mediachg(mii);
2872	NFE_UNLOCK(sc);
2873
2874	return (0);
2875}
2876
2877
2878static void
2879nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2880{
2881	struct nfe_softc *sc;
2882	struct mii_data *mii;
2883
2884	sc = ifp->if_softc;
2885
2886	NFE_LOCK(sc);
2887	mii = device_get_softc(sc->nfe_miibus);
2888	mii_pollstat(mii);
2889	NFE_UNLOCK(sc);
2890
2891	ifmr->ifm_active = mii->mii_media_active;
2892	ifmr->ifm_status = mii->mii_media_status;
2893}
2894
2895
2896void
2897nfe_tick(void *xsc)
2898{
2899	struct nfe_softc *sc;
2900	struct mii_data *mii;
2901	struct ifnet *ifp;
2902
2903	sc = (struct nfe_softc *)xsc;
2904
2905	NFE_LOCK_ASSERT(sc);
2906
2907	ifp = sc->nfe_ifp;
2908
2909	mii = device_get_softc(sc->nfe_miibus);
2910	mii_tick(mii);
2911	nfe_stats_update(sc);
2912	nfe_watchdog(ifp);
2913	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2914}
2915
2916
2917static int
2918nfe_shutdown(device_t dev)
2919{
2920	struct nfe_softc *sc;
2921	struct ifnet *ifp;
2922
2923	sc = device_get_softc(dev);
2924
2925	NFE_LOCK(sc);
2926	ifp = sc->nfe_ifp;
2927	nfe_stop(ifp);
2928	/* nfe_reset(sc); */
2929	NFE_UNLOCK(sc);
2930
2931	return (0);
2932}
2933
2934
2935static void
2936nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2937{
2938	uint32_t val;
2939
2940	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2941		val = NFE_READ(sc, NFE_MACADDR_LO);
2942		addr[0] = (val >> 8) & 0xff;
2943		addr[1] = (val & 0xff);
2944
2945		val = NFE_READ(sc, NFE_MACADDR_HI);
2946		addr[2] = (val >> 24) & 0xff;
2947		addr[3] = (val >> 16) & 0xff;
2948		addr[4] = (val >>  8) & 0xff;
2949		addr[5] = (val & 0xff);
2950	} else {
2951		val = NFE_READ(sc, NFE_MACADDR_LO);
2952		addr[5] = (val >> 8) & 0xff;
2953		addr[4] = (val & 0xff);
2954
2955		val = NFE_READ(sc, NFE_MACADDR_HI);
2956		addr[3] = (val >> 24) & 0xff;
2957		addr[2] = (val >> 16) & 0xff;
2958		addr[1] = (val >>  8) & 0xff;
2959		addr[0] = (val & 0xff);
2960	}
2961}
2962
2963
2964static void
2965nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2966{
2967
2968	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2969	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2970	    addr[1] << 8 | addr[0]);
2971}
2972
2973
2974/*
2975 * Map a single buffer address.
2976 */
2977
2978static void
2979nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2980{
2981	struct nfe_dmamap_arg *ctx;
2982
2983	if (error != 0)
2984		return;
2985
2986	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2987
2988	ctx = (struct nfe_dmamap_arg *)arg;
2989	ctx->nfe_busaddr = segs[0].ds_addr;
2990}
2991
2992
2993static int
2994sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2995{
2996	int error, value;
2997
2998	if (!arg1)
2999		return (EINVAL);
3000	value = *(int *)arg1;
3001	error = sysctl_handle_int(oidp, &value, 0, req);
3002	if (error || !req->newptr)
3003		return (error);
3004	if (value < low || value > high)
3005		return (EINVAL);
3006	*(int *)arg1 = value;
3007
3008	return (0);
3009}
3010
3011
3012static int
3013sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3014{
3015
3016	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3017	    NFE_PROC_MAX));
3018}
3019
3020
3021#define	NFE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
3022	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3023#define	NFE_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
3024	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3025
3026static void
3027nfe_sysctl_node(struct nfe_softc *sc)
3028{
3029	struct sysctl_ctx_list *ctx;
3030	struct sysctl_oid_list *child, *parent;
3031	struct sysctl_oid *tree;
3032	struct nfe_hw_stats *stats;
3033	int error;
3034
3035	stats = &sc->nfe_stats;
3036	ctx = device_get_sysctl_ctx(sc->nfe_dev);
3037	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3038	SYSCTL_ADD_PROC(ctx, child,
3039	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3040	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3041	    "max number of Rx events to process");
3042
3043	sc->nfe_process_limit = NFE_PROC_DEFAULT;
3044	error = resource_int_value(device_get_name(sc->nfe_dev),
3045	    device_get_unit(sc->nfe_dev), "process_limit",
3046	    &sc->nfe_process_limit);
3047	if (error == 0) {
3048		if (sc->nfe_process_limit < NFE_PROC_MIN ||
3049		    sc->nfe_process_limit > NFE_PROC_MAX) {
3050			device_printf(sc->nfe_dev,
3051			    "process_limit value out of range; "
3052			    "using default: %d\n", NFE_PROC_DEFAULT);
3053			sc->nfe_process_limit = NFE_PROC_DEFAULT;
3054		}
3055	}
3056
3057	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3058		return;
3059
3060	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3061	    NULL, "NFE statistics");
3062	parent = SYSCTL_CHILDREN(tree);
3063
3064	/* Rx statistics. */
3065	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3066	    NULL, "Rx MAC statistics");
3067	child = SYSCTL_CHILDREN(tree);
3068
3069	NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3070	    &stats->rx_frame_errors, "Framing Errors");
3071	NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3072	    &stats->rx_extra_bytes, "Extra Bytes");
3073	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3074	    &stats->rx_late_cols, "Late Collisions");
3075	NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3076	    &stats->rx_runts, "Runts");
3077	NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3078	    &stats->rx_jumbos, "Jumbos");
3079	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3080	    &stats->rx_fifo_overuns, "FIFO Overruns");
3081	NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3082	    &stats->rx_crc_errors, "CRC Errors");
3083	NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3084	    &stats->rx_fae, "Frame Alignment Errors");
3085	NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3086	    &stats->rx_len_errors, "Length Errors");
3087	NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3088	    &stats->rx_unicast, "Unicast Frames");
3089	NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3090	    &stats->rx_multicast, "Multicast Frames");
3091	NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3092	    &stats->rx_broadcast, "Broadcast Frames");
3093	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3094		NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3095		    &stats->rx_octets, "Octets");
3096		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3097		    &stats->rx_pause, "Pause frames");
3098		NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3099		    &stats->rx_drops, "Drop frames");
3100	}
3101
3102	/* Tx statistics. */
3103	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3104	    NULL, "Tx MAC statistics");
3105	child = SYSCTL_CHILDREN(tree);
3106	NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3107	    &stats->tx_octets, "Octets");
3108	NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3109	    &stats->tx_zero_rexmits, "Zero Retransmits");
3110	NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3111	    &stats->tx_one_rexmits, "One Retransmits");
3112	NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3113	    &stats->tx_multi_rexmits, "Multiple Retransmits");
3114	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3115	    &stats->tx_late_cols, "Late Collisions");
3116	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3117	    &stats->tx_fifo_underuns, "FIFO Underruns");
3118	NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3119	    &stats->tx_carrier_losts, "Carrier Losts");
3120	NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3121	    &stats->tx_excess_deferals, "Excess Deferrals");
3122	NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3123	    &stats->tx_retry_errors, "Retry Errors");
3124	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3125		NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3126		    &stats->tx_deferals, "Deferrals");
3127		NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3128		    &stats->tx_frames, "Frames");
3129		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3130		    &stats->tx_pause, "Pause Frames");
3131	}
3132	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3133		NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3134		    &stats->tx_deferals, "Unicast Frames");
3135		NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3136		    &stats->tx_frames, "Multicast Frames");
3137		NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3138		    &stats->tx_pause, "Broadcast Frames");
3139	}
3140}
3141
3142#undef NFE_SYSCTL_STAT_ADD32
3143#undef NFE_SYSCTL_STAT_ADD64
3144
3145static void
3146nfe_stats_clear(struct nfe_softc *sc)
3147{
3148	int i, mib_cnt;
3149
3150	if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3151		mib_cnt = NFE_NUM_MIB_STATV1;
3152	else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3153		mib_cnt = NFE_NUM_MIB_STATV2;
3154	else
3155		return;
3156
3157	for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3158		NFE_READ(sc, NFE_TX_OCTET + i);
3159
3160	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3161		NFE_READ(sc, NFE_TX_UNICAST);
3162		NFE_READ(sc, NFE_TX_MULTICAST);
3163		NFE_READ(sc, NFE_TX_BROADCAST);
3164	}
3165}
3166
3167static void
3168nfe_stats_update(struct nfe_softc *sc)
3169{
3170	struct nfe_hw_stats *stats;
3171
3172	NFE_LOCK_ASSERT(sc);
3173
3174	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3175		return;
3176
3177	stats = &sc->nfe_stats;
3178	stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3179	stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3180	stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3181	stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3182	stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3183	stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3184	stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3185	stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3186	stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3187	stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3188	stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3189	stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3190	stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3191	stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3192	stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3193	stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3194	stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3195	stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3196	stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3197	stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3198	stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3199
3200	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3201		stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3202		stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3203		stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3204		stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3205		stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3206		stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3207	}
3208
3209	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3210		stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3211		stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3212		stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3213	}
3214}
3215