if_nfe.c revision 170589
1/*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 170589 2007-06-12 02:16:02Z yongari $");
25
26#ifdef HAVE_KERNEL_OPTION_HEADERS
27#include "opt_device_polling.h"
28#endif
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/kernel.h>
38#include <sys/queue.h>
39#include <sys/socket.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/if.h>
44#include <net/if_arp.h>
45#include <net/ethernet.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_vlan_var.h>
50
51#include <net/bpf.h>
52
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63
64#include <dev/nfe/if_nfereg.h>
65#include <dev/nfe/if_nfevar.h>
66
67MODULE_DEPEND(nfe, pci, 1, 1, 1);
68MODULE_DEPEND(nfe, ether, 1, 1, 1);
69MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70
71/* "device miibus" required.  See GENERIC if you get errors here. */
72#include "miibus_if.h"
73
74static int  nfe_probe(device_t);
75static int  nfe_attach(device_t);
76static int  nfe_detach(device_t);
77static int  nfe_suspend(device_t);
78static int  nfe_resume(device_t);
79static void nfe_shutdown(device_t);
80static void nfe_power(struct nfe_softc *);
81static int  nfe_miibus_readreg(device_t, int, int);
82static int  nfe_miibus_writereg(device_t, int, int, int);
83static void nfe_miibus_statchg(device_t);
84static void nfe_link_task(void *, int);
85static void nfe_set_intr(struct nfe_softc *);
86static __inline void nfe_enable_intr(struct nfe_softc *);
87static __inline void nfe_disable_intr(struct nfe_softc *);
88static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
89static void nfe_alloc_msix(struct nfe_softc *, int);
90static int nfe_intr(void *);
91static void nfe_int_task(void *, int);
92static void *nfe_jalloc(struct nfe_softc *);
93static void nfe_jfree(void *, void *);
94static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
95static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
96static int nfe_newbuf(struct nfe_softc *, int);
97static int nfe_jnewbuf(struct nfe_softc *, int);
98static int  nfe_rxeof(struct nfe_softc *, int);
99static int  nfe_jrxeof(struct nfe_softc *, int);
100static void nfe_txeof(struct nfe_softc *);
101static struct mbuf *nfe_defrag(struct mbuf *, int, int);
102static int  nfe_encap(struct nfe_softc *, struct mbuf **);
103static void nfe_setmulti(struct nfe_softc *);
104static void nfe_tx_task(void *, int);
105static void nfe_start(struct ifnet *);
106static void nfe_watchdog(struct ifnet *);
107static void nfe_init(void *);
108static void nfe_init_locked(void *);
109static void nfe_stop(struct ifnet *);
110static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111static int  nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
115static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
116static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
119static int  nfe_ifmedia_upd(struct ifnet *);
120static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
121static void nfe_tick(void *);
122static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
123static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
124static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
125
126static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
127static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
128
129#ifdef NFE_DEBUG
130static int nfedebug = 0;
131#define	DPRINTF(sc, ...)	do {				\
132	if (nfedebug)						\
133		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
134} while (0)
135#define	DPRINTFN(sc, n, ...)	do {				\
136	if (nfedebug >= (n))					\
137		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
138} while (0)
139#else
140#define	DPRINTF(sc, ...)
141#define	DPRINTFN(sc, n, ...)
142#endif
143
144#define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
145#define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
146#define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
147
148#define	NFE_JLIST_LOCK(_sc)	mtx_lock(&(_sc)->nfe_jlist_mtx)
149#define	NFE_JLIST_UNLOCK(_sc)	mtx_unlock(&(_sc)->nfe_jlist_mtx)
150
151/* Tunables. */
152static int msi_disable = 0;
153static int msix_disable = 0;
154TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
155TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
156
157static device_method_t nfe_methods[] = {
158	/* Device interface */
159	DEVMETHOD(device_probe,		nfe_probe),
160	DEVMETHOD(device_attach,	nfe_attach),
161	DEVMETHOD(device_detach,	nfe_detach),
162	DEVMETHOD(device_suspend,	nfe_suspend),
163	DEVMETHOD(device_resume,	nfe_resume),
164	DEVMETHOD(device_shutdown,	nfe_shutdown),
165
166	/* bus interface */
167	DEVMETHOD(bus_print_child,	bus_generic_print_child),
168	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
169
170	/* MII interface */
171	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
172	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
173	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
174
175	{ NULL, NULL }
176};
177
178static driver_t nfe_driver = {
179	"nfe",
180	nfe_methods,
181	sizeof(struct nfe_softc)
182};
183
184static devclass_t nfe_devclass;
185
186DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
187DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
188
189static struct nfe_type nfe_devs[] = {
190	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
191	    "NVIDIA nForce MCP Networking Adapter"},
192	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
193	    "NVIDIA nForce2 MCP2 Networking Adapter"},
194	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
195	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
196	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
197	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
198	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
199	    "NVIDIA nForce3 MCP3 Networking Adapter"},
200	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
201	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
202	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
203	    "NVIDIA nForce3 MCP7 Networking Adapter"},
204	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
205	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
206	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
207	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
208	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
209	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
210	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
211	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
212	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
213	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
214	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
215	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
216	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
217	    "NVIDIA nForce MCP55 Networking Adapter"},
218	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
219	    "NVIDIA nForce MCP55 Networking Adapter"},
220	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
221	    "NVIDIA nForce MCP61 Networking Adapter"},
222	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
223	    "NVIDIA nForce MCP61 Networking Adapter"},
224	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
225	    "NVIDIA nForce MCP61 Networking Adapter"},
226	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
227	    "NVIDIA nForce MCP61 Networking Adapter"},
228	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
229	    "NVIDIA nForce MCP65 Networking Adapter"},
230	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
231	    "NVIDIA nForce MCP65 Networking Adapter"},
232	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
233	    "NVIDIA nForce MCP65 Networking Adapter"},
234	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
235	    "NVIDIA nForce MCP65 Networking Adapter"},
236	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
237	    "NVIDIA nForce MCP67 Networking Adapter"},
238	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
239	    "NVIDIA nForce MCP67 Networking Adapter"},
240	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
241	    "NVIDIA nForce MCP67 Networking Adapter"},
242	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
243	    "NVIDIA nForce MCP67 Networking Adapter"},
244	{0, 0, NULL}
245};
246
247
248/* Probe for supported hardware ID's */
249static int
250nfe_probe(device_t dev)
251{
252	struct nfe_type *t;
253
254	t = nfe_devs;
255	/* Check for matching PCI DEVICE ID's */
256	while (t->name != NULL) {
257		if ((pci_get_vendor(dev) == t->vid_id) &&
258		    (pci_get_device(dev) == t->dev_id)) {
259			device_set_desc(dev, t->name);
260			return (BUS_PROBE_DEFAULT);
261		}
262		t++;
263	}
264
265	return (ENXIO);
266}
267
268static void
269nfe_alloc_msix(struct nfe_softc *sc, int count)
270{
271	int rid;
272
273	rid = PCIR_BAR(2);
274	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
275	    &rid, RF_ACTIVE);
276	if (sc->nfe_msix_res == NULL) {
277		device_printf(sc->nfe_dev,
278		    "couldn't allocate MSIX table resource\n");
279		return;
280	}
281	rid = PCIR_BAR(3);
282	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
283	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
284	if (sc->nfe_msix_pba_res == NULL) {
285		device_printf(sc->nfe_dev,
286		    "couldn't allocate MSIX PBA resource\n");
287		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
288		    sc->nfe_msix_res);
289		sc->nfe_msix_res = NULL;
290		return;
291	}
292
293	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
294		if (count == NFE_MSI_MESSAGES) {
295			if (bootverbose)
296				device_printf(sc->nfe_dev,
297				    "Using %d MSIX messages\n", count);
298			sc->nfe_msix = 1;
299		} else {
300			if (bootverbose)
301				device_printf(sc->nfe_dev,
302				    "couldn't allocate MSIX\n");
303			pci_release_msi(sc->nfe_dev);
304			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
305			    PCIR_BAR(3), sc->nfe_msix_pba_res);
306			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
307			    PCIR_BAR(2), sc->nfe_msix_res);
308			sc->nfe_msix_pba_res = NULL;
309			sc->nfe_msix_res = NULL;
310		}
311	}
312}
313
314static int
315nfe_attach(device_t dev)
316{
317	struct nfe_softc *sc;
318	struct ifnet *ifp;
319	bus_addr_t dma_addr_max;
320	int error = 0, i, msic, reg, rid;
321
322	sc = device_get_softc(dev);
323	sc->nfe_dev = dev;
324
325	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
326	    MTX_DEF);
327	mtx_init(&sc->nfe_jlist_mtx, "nfe_jlist_mtx", NULL, MTX_DEF);
328	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
329	TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
330	SLIST_INIT(&sc->nfe_jfree_listhead);
331	SLIST_INIT(&sc->nfe_jinuse_listhead);
332
333	pci_enable_busmaster(dev);
334
335	rid = PCIR_BAR(0);
336	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
337	    RF_ACTIVE);
338	if (sc->nfe_res[0] == NULL) {
339		device_printf(dev, "couldn't map memory resources\n");
340		mtx_destroy(&sc->nfe_mtx);
341		return (ENXIO);
342	}
343
344	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
345		uint16_t v, width;
346
347		v = pci_read_config(dev, reg + 0x08, 2);
348		/* Change max. read request size to 4096. */
349		v &= ~(7 << 12);
350		v |= (5 << 12);
351		pci_write_config(dev, reg + 0x08, v, 2);
352
353		v = pci_read_config(dev, reg + 0x0c, 2);
354		/* link capability */
355		v = (v >> 4) & 0x0f;
356		width = pci_read_config(dev, reg + 0x12, 2);
357		/* negotiated link width */
358		width = (width >> 4) & 0x3f;
359		if (v != width)
360			device_printf(sc->nfe_dev,
361			    "warning, negotiated width of link(x%d) != "
362			    "max. width of link(x%d)\n", width, v);
363	}
364
365	/* Allocate interrupt */
366	if (msix_disable == 0 || msi_disable == 0) {
367		if (msix_disable == 0 &&
368		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
369			nfe_alloc_msix(sc, msic);
370		if (msi_disable == 0 && sc->nfe_msix == 0 &&
371		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
372		    pci_alloc_msi(dev, &msic) == 0) {
373			if (msic == NFE_MSI_MESSAGES) {
374				if (bootverbose)
375					device_printf(dev,
376					    "Using %d MSI messages\n", msic);
377				sc->nfe_msi = 1;
378			} else
379				pci_release_msi(dev);
380		}
381	}
382
383	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
384		rid = 0;
385		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
386		    RF_SHAREABLE | RF_ACTIVE);
387		if (sc->nfe_irq[0] == NULL) {
388			device_printf(dev, "couldn't allocate IRQ resources\n");
389			error = ENXIO;
390			goto fail;
391		}
392	} else {
393		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
394			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
395			    SYS_RES_IRQ, &rid, RF_ACTIVE);
396			if (sc->nfe_irq[i] == NULL) {
397				device_printf(dev,
398				    "couldn't allocate IRQ resources for "
399				    "message %d\n", rid);
400				error = ENXIO;
401				goto fail;
402			}
403		}
404		/* Map interrupts to vector 0. */
405		if (sc->nfe_msix != 0) {
406			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
407			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
408		} else if (sc->nfe_msi != 0) {
409			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
410			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
411		}
412	}
413
414	/* Set IRQ status/mask register. */
415	sc->nfe_irq_status = NFE_IRQ_STATUS;
416	sc->nfe_irq_mask = NFE_IRQ_MASK;
417	sc->nfe_intrs = NFE_IRQ_WANTED;
418	sc->nfe_nointrs = 0;
419	if (sc->nfe_msix != 0) {
420		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
421		sc->nfe_nointrs = NFE_IRQ_WANTED;
422	} else if (sc->nfe_msi != 0) {
423		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
424		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
425	}
426
427	sc->nfe_devid = pci_get_device(dev);
428	sc->nfe_revid = pci_get_revid(dev);
429	sc->nfe_flags = 0;
430
431	switch (sc->nfe_devid) {
432	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
433	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
434	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
435	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
436		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
437		break;
438	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
439	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
440		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
441		break;
442	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
443	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
444	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
445	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
446		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
447		break;
448	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
449	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
450		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
451		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL;
452		break;
453
454	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
455	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
456	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
457	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
458	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
459	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
460	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
461	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
462		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
463		    NFE_TX_FLOW_CTRL;
464		break;
465	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
466	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
467	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
468	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
469		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
470		    NFE_PWR_MGMT | NFE_TX_FLOW_CTRL;
471		break;
472	}
473
474	nfe_power(sc);
475	/* Check for reversed ethernet address */
476	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
477		sc->nfe_flags |= NFE_CORRECT_MACADDR;
478	nfe_get_macaddr(sc, sc->eaddr);
479	/*
480	 * Allocate the parent bus DMA tag appropriate for PCI.
481	 */
482	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
483	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
484		dma_addr_max = NFE_DMA_MAXADDR;
485	error = bus_dma_tag_create(
486	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
487	    1, 0,				/* alignment, boundary */
488	    dma_addr_max,			/* lowaddr */
489	    BUS_SPACE_MAXADDR,			/* highaddr */
490	    NULL, NULL,				/* filter, filterarg */
491	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
492	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
493	    0,					/* flags */
494	    NULL, NULL,				/* lockfunc, lockarg */
495	    &sc->nfe_parent_tag);
496	if (error)
497		goto fail;
498
499	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
500	if (ifp == NULL) {
501		device_printf(dev, "can not if_alloc()\n");
502		error = ENOSPC;
503		goto fail;
504	}
505	TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
506
507	/*
508	 * Allocate Tx and Rx rings.
509	 */
510	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
511		goto fail;
512
513	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
514		goto fail;
515
516	if ((error = nfe_alloc_jrx_ring(sc, &sc->jrxq)) != 0)
517		goto fail;
518
519	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
520	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
521	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
522	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
523	    "max number of Rx events to process");
524
525	sc->nfe_process_limit = NFE_PROC_DEFAULT;
526	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
527	    "process_limit", &sc->nfe_process_limit);
528	if (error == 0) {
529		if (sc->nfe_process_limit < NFE_PROC_MIN ||
530		    sc->nfe_process_limit > NFE_PROC_MAX) {
531			device_printf(dev, "process_limit value out of range; "
532			    "using default: %d\n", NFE_PROC_DEFAULT);
533			sc->nfe_process_limit = NFE_PROC_DEFAULT;
534		}
535	}
536
537	ifp->if_softc = sc;
538	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
539	ifp->if_mtu = ETHERMTU;
540	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
541	ifp->if_ioctl = nfe_ioctl;
542	ifp->if_start = nfe_start;
543	ifp->if_hwassist = 0;
544	ifp->if_capabilities = 0;
545	ifp->if_watchdog = NULL;
546	ifp->if_init = nfe_init;
547	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
548	ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
549	IFQ_SET_READY(&ifp->if_snd);
550
551	if (sc->nfe_flags & NFE_HW_CSUM) {
552		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
553		ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
554	}
555	ifp->if_capenable = ifp->if_capabilities;
556
557	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
558	/* VLAN capability setup. */
559	ifp->if_capabilities |= IFCAP_VLAN_MTU;
560	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
561		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
562		if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
563			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
564	}
565	ifp->if_capenable = ifp->if_capabilities;
566
567	/*
568	 * Tell the upper layer(s) we support long frames.
569	 * Must appear after the call to ether_ifattach() because
570	 * ether_ifattach() sets ifi_hdrlen to the default value.
571	 */
572	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
573
574#ifdef DEVICE_POLLING
575	ifp->if_capabilities |= IFCAP_POLLING;
576#endif
577
578	/* Do MII setup */
579	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
580	    nfe_ifmedia_sts)) {
581		device_printf(dev, "MII without any phy!\n");
582		error = ENXIO;
583		goto fail;
584	}
585	ether_ifattach(ifp, sc->eaddr);
586
587	TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
588	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
589	    taskqueue_thread_enqueue, &sc->nfe_tq);
590	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
591	    device_get_nameunit(sc->nfe_dev));
592	error = 0;
593	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
594		error = bus_setup_intr(dev, sc->nfe_irq[0],
595		    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
596		    &sc->nfe_intrhand[0]);
597	} else {
598		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
599			error = bus_setup_intr(dev, sc->nfe_irq[i],
600			    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
601			    &sc->nfe_intrhand[i]);
602			if (error != 0)
603				break;
604		}
605	}
606	if (error) {
607		device_printf(dev, "couldn't set up irq\n");
608		taskqueue_free(sc->nfe_tq);
609		sc->nfe_tq = NULL;
610		ether_ifdetach(ifp);
611		goto fail;
612	}
613
614fail:
615	if (error)
616		nfe_detach(dev);
617
618	return (error);
619}
620
621
622static int
623nfe_detach(device_t dev)
624{
625	struct nfe_softc *sc;
626	struct ifnet *ifp;
627	uint8_t eaddr[ETHER_ADDR_LEN];
628	int i, rid;
629
630	sc = device_get_softc(dev);
631	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
632	ifp = sc->nfe_ifp;
633
634#ifdef DEVICE_POLLING
635	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
636		ether_poll_deregister(ifp);
637#endif
638	if (device_is_attached(dev)) {
639		NFE_LOCK(sc);
640		nfe_stop(ifp);
641		ifp->if_flags &= ~IFF_UP;
642		NFE_UNLOCK(sc);
643		callout_drain(&sc->nfe_stat_ch);
644		taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
645		taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
646		ether_ifdetach(ifp);
647	}
648
649	if (ifp) {
650		/* restore ethernet address */
651		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
652			for (i = 0; i < ETHER_ADDR_LEN; i++) {
653				eaddr[i] = sc->eaddr[5 - i];
654			}
655		} else
656			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
657		nfe_set_macaddr(sc, eaddr);
658		if_free(ifp);
659	}
660	if (sc->nfe_miibus)
661		device_delete_child(dev, sc->nfe_miibus);
662	bus_generic_detach(dev);
663	if (sc->nfe_tq != NULL) {
664		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
665		taskqueue_free(sc->nfe_tq);
666		sc->nfe_tq = NULL;
667	}
668
669	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
670		if (sc->nfe_intrhand[i] != NULL) {
671			bus_teardown_intr(dev, sc->nfe_irq[i],
672			    sc->nfe_intrhand[i]);
673			sc->nfe_intrhand[i] = NULL;
674		}
675	}
676
677	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
678		if (sc->nfe_irq[0] != NULL)
679			bus_release_resource(dev, SYS_RES_IRQ, 0,
680			    sc->nfe_irq[0]);
681	} else {
682		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
683			if (sc->nfe_irq[i] != NULL) {
684				bus_release_resource(dev, SYS_RES_IRQ, rid,
685				    sc->nfe_irq[i]);
686				sc->nfe_irq[i] = NULL;
687			}
688		}
689		pci_release_msi(dev);
690	}
691	if (sc->nfe_msix_pba_res != NULL) {
692		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
693		    sc->nfe_msix_pba_res);
694		sc->nfe_msix_pba_res = NULL;
695	}
696	if (sc->nfe_msix_res != NULL) {
697		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
698		    sc->nfe_msix_res);
699		sc->nfe_msix_res = NULL;
700	}
701	if (sc->nfe_res[0] != NULL) {
702		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
703		    sc->nfe_res[0]);
704		sc->nfe_res[0] = NULL;
705	}
706
707	nfe_free_tx_ring(sc, &sc->txq);
708	nfe_free_rx_ring(sc, &sc->rxq);
709	nfe_free_jrx_ring(sc, &sc->jrxq);
710
711	if (sc->nfe_parent_tag) {
712		bus_dma_tag_destroy(sc->nfe_parent_tag);
713		sc->nfe_parent_tag = NULL;
714	}
715
716	mtx_destroy(&sc->nfe_jlist_mtx);
717	mtx_destroy(&sc->nfe_mtx);
718
719	return (0);
720}
721
722
723static int
724nfe_suspend(device_t dev)
725{
726	struct nfe_softc *sc;
727
728	sc = device_get_softc(dev);
729
730	NFE_LOCK(sc);
731	nfe_stop(sc->nfe_ifp);
732	sc->nfe_suspended = 1;
733	NFE_UNLOCK(sc);
734
735	return (0);
736}
737
738
739static int
740nfe_resume(device_t dev)
741{
742	struct nfe_softc *sc;
743	struct ifnet *ifp;
744
745	sc = device_get_softc(dev);
746
747	NFE_LOCK(sc);
748	ifp = sc->nfe_ifp;
749	if (ifp->if_flags & IFF_UP)
750		nfe_init_locked(sc);
751	sc->nfe_suspended = 0;
752	NFE_UNLOCK(sc);
753
754	return (0);
755}
756
757
758/* Take PHY/NIC out of powerdown, from Linux */
759static void
760nfe_power(struct nfe_softc *sc)
761{
762	uint32_t pwr;
763
764	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
765		return;
766	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
767	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
768	DELAY(100);
769	NFE_WRITE(sc, NFE_MAC_RESET, 0);
770	DELAY(100);
771	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
772	pwr = NFE_READ(sc, NFE_PWR2_CTL);
773	pwr &= ~NFE_PWR2_WAKEUP_MASK;
774	if (sc->nfe_revid >= 0xa3 &&
775	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
776	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
777		pwr |= NFE_PWR2_REVA3;
778	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
779}
780
781
782static void
783nfe_miibus_statchg(device_t dev)
784{
785	struct nfe_softc *sc;
786
787	sc = device_get_softc(dev);
788	taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
789}
790
791
792static void
793nfe_link_task(void *arg, int pending)
794{
795	struct nfe_softc *sc;
796	struct mii_data *mii;
797	struct ifnet *ifp;
798	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
799	uint32_t gmask, rxctl, txctl, val;
800
801	sc = (struct nfe_softc *)arg;
802
803	NFE_LOCK(sc);
804
805	mii = device_get_softc(sc->nfe_miibus);
806	ifp = sc->nfe_ifp;
807	if (mii == NULL || ifp == NULL) {
808		NFE_UNLOCK(sc);
809		return;
810	}
811
812	if (mii->mii_media_status & IFM_ACTIVE) {
813		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
814			sc->nfe_link = 1;
815	} else
816		sc->nfe_link = 0;
817
818	phy = NFE_READ(sc, NFE_PHY_IFACE);
819	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
820
821	seed = NFE_READ(sc, NFE_RNDSEED);
822	seed &= ~NFE_SEED_MASK;
823
824	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
825		phy  |= NFE_PHY_HDX;	/* half-duplex */
826		misc |= NFE_MISC1_HDX;
827	}
828
829	switch (IFM_SUBTYPE(mii->mii_media_active)) {
830	case IFM_1000_T:	/* full-duplex only */
831		link |= NFE_MEDIA_1000T;
832		seed |= NFE_SEED_1000T;
833		phy  |= NFE_PHY_1000T;
834		break;
835	case IFM_100_TX:
836		link |= NFE_MEDIA_100TX;
837		seed |= NFE_SEED_100TX;
838		phy  |= NFE_PHY_100TX;
839		break;
840	case IFM_10_T:
841		link |= NFE_MEDIA_10T;
842		seed |= NFE_SEED_10T;
843		break;
844	}
845
846	if ((phy & 0x10000000) != 0) {
847		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
848			val = NFE_R1_MAGIC_1000;
849		else
850			val = NFE_R1_MAGIC_10_100;
851	} else
852		val = NFE_R1_MAGIC_DEFAULT;
853	NFE_WRITE(sc, NFE_SETUP_R1, val);
854
855	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
856
857	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
858	NFE_WRITE(sc, NFE_MISC1, misc);
859	NFE_WRITE(sc, NFE_LINKSPEED, link);
860
861	gmask = mii->mii_media_active & IFM_GMASK;
862	if ((gmask & IFM_FDX) != 0) {
863		/* It seems all hardwares supports Rx pause frames. */
864		val = NFE_READ(sc, NFE_RXFILTER);
865		if ((gmask & IFM_FLAG0) != 0)
866			val |= NFE_PFF_RX_PAUSE;
867		else
868			val &= ~NFE_PFF_RX_PAUSE;
869		NFE_WRITE(sc, NFE_RXFILTER, val);
870		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
871			val = NFE_READ(sc, NFE_MISC1);
872			if ((gmask & IFM_FLAG1) != 0) {
873				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
874				    NFE_TX_PAUSE_FRAME_ENABLE);
875				val |= NFE_MISC1_TX_PAUSE;
876			} else {
877				val &= ~NFE_MISC1_TX_PAUSE;
878				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
879				    NFE_TX_PAUSE_FRAME_DISABLE);
880			}
881			NFE_WRITE(sc, NFE_MISC1, val);
882		}
883	} else {
884		/* disable rx/tx pause frames */
885		val = NFE_READ(sc, NFE_RXFILTER);
886		val &= ~NFE_PFF_RX_PAUSE;
887		NFE_WRITE(sc, NFE_RXFILTER, val);
888		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
889			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
890			    NFE_TX_PAUSE_FRAME_DISABLE);
891			val = NFE_READ(sc, NFE_MISC1);
892			val &= ~NFE_MISC1_TX_PAUSE;
893			NFE_WRITE(sc, NFE_MISC1, val);
894		}
895	}
896
897	txctl = NFE_READ(sc, NFE_TX_CTL);
898	rxctl = NFE_READ(sc, NFE_RX_CTL);
899	if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
900		txctl |= NFE_TX_START;
901		rxctl |= NFE_RX_START;
902	} else {
903		txctl &= ~NFE_TX_START;
904		rxctl &= ~NFE_RX_START;
905	}
906	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
907	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
908
909	NFE_UNLOCK(sc);
910}
911
912
913static int
914nfe_miibus_readreg(device_t dev, int phy, int reg)
915{
916	struct nfe_softc *sc = device_get_softc(dev);
917	uint32_t val;
918	int ntries;
919
920	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
921
922	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
923		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
924		DELAY(100);
925	}
926
927	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
928
929	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
930		DELAY(100);
931		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
932			break;
933	}
934	if (ntries == NFE_TIMEOUT) {
935		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
936		return 0;
937	}
938
939	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
940		DPRINTFN(sc, 2, "could not read PHY\n");
941		return 0;
942	}
943
944	val = NFE_READ(sc, NFE_PHY_DATA);
945	if (val != 0xffffffff && val != 0)
946		sc->mii_phyaddr = phy;
947
948	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
949
950	return (val);
951}
952
953
954static int
955nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
956{
957	struct nfe_softc *sc = device_get_softc(dev);
958	uint32_t ctl;
959	int ntries;
960
961	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
962
963	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
964		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
965		DELAY(100);
966	}
967
968	NFE_WRITE(sc, NFE_PHY_DATA, val);
969	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
970	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
971
972	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
973		DELAY(100);
974		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
975			break;
976	}
977#ifdef NFE_DEBUG
978	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
979		device_printf(sc->nfe_dev, "could not write to PHY\n");
980#endif
981	return (0);
982}
983
984/*
985 * Allocate a jumbo buffer.
986 */
987static void *
988nfe_jalloc(struct nfe_softc *sc)
989{
990	struct nfe_jpool_entry *entry;
991
992	NFE_JLIST_LOCK(sc);
993
994	entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
995
996	if (entry == NULL) {
997		NFE_JLIST_UNLOCK(sc);
998		return (NULL);
999	}
1000
1001	SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
1002	SLIST_INSERT_HEAD(&sc->nfe_jinuse_listhead, entry, jpool_entries);
1003
1004	NFE_JLIST_UNLOCK(sc);
1005
1006	return (sc->jrxq.jslots[entry->slot]);
1007}
1008
1009/*
1010 * Release a jumbo buffer.
1011 */
1012static void
1013nfe_jfree(void *buf, void *args)
1014{
1015	struct nfe_softc *sc;
1016	struct nfe_jpool_entry *entry;
1017	int i;
1018
1019	/* Extract the softc struct pointer. */
1020	sc = (struct nfe_softc *)args;
1021	KASSERT(sc != NULL, ("%s: can't find softc pointer!", __func__));
1022
1023	NFE_JLIST_LOCK(sc);
1024	/* Calculate the slot this buffer belongs to. */
1025	i = ((vm_offset_t)buf
1026	     - (vm_offset_t)sc->jrxq.jpool) / NFE_JLEN;
1027	KASSERT(i >= 0 && i < NFE_JSLOTS,
1028	    ("%s: asked to free buffer that we don't manage!", __func__));
1029
1030	entry = SLIST_FIRST(&sc->nfe_jinuse_listhead);
1031	KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
1032	entry->slot = i;
1033	SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
1034	SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, jpool_entries);
1035	if (SLIST_EMPTY(&sc->nfe_jinuse_listhead))
1036		wakeup(sc);
1037
1038	NFE_JLIST_UNLOCK(sc);
1039}
1040
1041struct nfe_dmamap_arg {
1042	bus_addr_t nfe_busaddr;
1043};
1044
1045static int
1046nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1047{
1048	struct nfe_dmamap_arg ctx;
1049	struct nfe_rx_data *data;
1050	void *desc;
1051	int i, error, descsize;
1052
1053	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1054		desc = ring->desc64;
1055		descsize = sizeof (struct nfe_desc64);
1056	} else {
1057		desc = ring->desc32;
1058		descsize = sizeof (struct nfe_desc32);
1059	}
1060
1061	ring->cur = ring->next = 0;
1062
1063	error = bus_dma_tag_create(sc->nfe_parent_tag,
1064	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1065	    BUS_SPACE_MAXADDR,			/* lowaddr */
1066	    BUS_SPACE_MAXADDR,			/* highaddr */
1067	    NULL, NULL,				/* filter, filterarg */
1068	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1069	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
1070	    0,					/* flags */
1071	    NULL, NULL,				/* lockfunc, lockarg */
1072	    &ring->rx_desc_tag);
1073	if (error != 0) {
1074		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1075		goto fail;
1076	}
1077
1078	/* allocate memory to desc */
1079	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1080	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1081	if (error != 0) {
1082		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1083		goto fail;
1084	}
1085	if (sc->nfe_flags & NFE_40BIT_ADDR)
1086		ring->desc64 = desc;
1087	else
1088		ring->desc32 = desc;
1089
1090	/* map desc to device visible address space */
1091	ctx.nfe_busaddr = 0;
1092	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1093	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1094	if (error != 0) {
1095		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1096		goto fail;
1097	}
1098	ring->physaddr = ctx.nfe_busaddr;
1099
1100	error = bus_dma_tag_create(sc->nfe_parent_tag,
1101	    1, 0,			/* alignment, boundary */
1102	    BUS_SPACE_MAXADDR,		/* lowaddr */
1103	    BUS_SPACE_MAXADDR,		/* highaddr */
1104	    NULL, NULL,			/* filter, filterarg */
1105	    MCLBYTES, 1,		/* maxsize, nsegments */
1106	    MCLBYTES,			/* maxsegsize */
1107	    0,				/* flags */
1108	    NULL, NULL,			/* lockfunc, lockarg */
1109	    &ring->rx_data_tag);
1110	if (error != 0) {
1111		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1112		goto fail;
1113	}
1114
1115	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1116	if (error != 0) {
1117		device_printf(sc->nfe_dev,
1118		    "could not create Rx DMA spare map\n");
1119		goto fail;
1120	}
1121
1122	/*
1123	 * Pre-allocate Rx buffers and populate Rx ring.
1124	 */
1125	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1126		data = &sc->rxq.data[i];
1127		data->rx_data_map = NULL;
1128		data->m = NULL;
1129		error = bus_dmamap_create(ring->rx_data_tag, 0,
1130		    &data->rx_data_map);
1131		if (error != 0) {
1132			device_printf(sc->nfe_dev,
1133			    "could not create Rx DMA map\n");
1134			goto fail;
1135		}
1136	}
1137
1138fail:
1139	return (error);
1140}
1141
1142
1143static int
1144nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1145{
1146	struct nfe_dmamap_arg ctx;
1147	struct nfe_rx_data *data;
1148	void *desc;
1149	struct nfe_jpool_entry *entry;
1150	uint8_t *ptr;
1151	int i, error, descsize;
1152
1153	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1154		return (0);
1155
1156	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1157		desc = ring->jdesc64;
1158		descsize = sizeof (struct nfe_desc64);
1159	} else {
1160		desc = ring->jdesc32;
1161		descsize = sizeof (struct nfe_desc32);
1162	}
1163
1164	ring->jcur = ring->jnext = 0;
1165
1166	/* Create DMA tag for jumbo Rx ring. */
1167	error = bus_dma_tag_create(sc->nfe_parent_tag,
1168	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1169	    BUS_SPACE_MAXADDR,			/* lowaddr */
1170	    BUS_SPACE_MAXADDR,			/* highaddr */
1171	    NULL, NULL,				/* filter, filterarg */
1172	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
1173	    1, 					/* nsegments */
1174	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
1175	    0,					/* flags */
1176	    NULL, NULL,				/* lockfunc, lockarg */
1177	    &ring->jrx_desc_tag);
1178	if (error != 0) {
1179		device_printf(sc->nfe_dev,
1180		    "could not create jumbo ring DMA tag\n");
1181		goto fail;
1182	}
1183
1184	/* Create DMA tag for jumbo buffer blocks. */
1185	error = bus_dma_tag_create(sc->nfe_parent_tag,
1186	    PAGE_SIZE, 0,			/* alignment, boundary */
1187	    BUS_SPACE_MAXADDR,			/* lowaddr */
1188	    BUS_SPACE_MAXADDR,			/* highaddr */
1189	    NULL, NULL,				/* filter, filterarg */
1190	    NFE_JMEM,				/* maxsize */
1191	    1, 					/* nsegments */
1192	    NFE_JMEM,				/* maxsegsize */
1193	    0,					/* flags */
1194	    NULL, NULL,				/* lockfunc, lockarg */
1195	    &ring->jrx_jumbo_tag);
1196	if (error != 0) {
1197		device_printf(sc->nfe_dev,
1198		    "could not create jumbo Rx buffer block DMA tag\n");
1199		goto fail;
1200	}
1201
1202	/* Create DMA tag for jumbo Rx buffers. */
1203	error = bus_dma_tag_create(sc->nfe_parent_tag,
1204	    PAGE_SIZE, 0,			/* alignment, boundary */
1205	    BUS_SPACE_MAXADDR,			/* lowaddr */
1206	    BUS_SPACE_MAXADDR,			/* highaddr */
1207	    NULL, NULL,				/* filter, filterarg */
1208	    NFE_JLEN,				/* maxsize */
1209	    1,					/* nsegments */
1210	    NFE_JLEN,				/* maxsegsize */
1211	    0,					/* flags */
1212	    NULL, NULL,				/* lockfunc, lockarg */
1213	    &ring->jrx_data_tag);
1214	if (error != 0) {
1215		device_printf(sc->nfe_dev,
1216		    "could not create jumbo Rx buffer DMA tag\n");
1217		goto fail;
1218	}
1219
1220	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1221	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1222	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1223	if (error != 0) {
1224		device_printf(sc->nfe_dev,
1225		    "could not allocate DMA'able memory for jumbo Rx ring\n");
1226		goto fail;
1227	}
1228	if (sc->nfe_flags & NFE_40BIT_ADDR)
1229		ring->jdesc64 = desc;
1230	else
1231		ring->jdesc32 = desc;
1232
1233	ctx.nfe_busaddr = 0;
1234	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1235	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1236	if (error != 0) {
1237		device_printf(sc->nfe_dev,
1238		    "could not load DMA'able memory for jumbo Rx ring\n");
1239		goto fail;
1240	}
1241	ring->jphysaddr = ctx.nfe_busaddr;
1242
1243	/* Create DMA maps for jumbo Rx buffers. */
1244	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1245	if (error != 0) {
1246		device_printf(sc->nfe_dev,
1247		    "could not create jumbo Rx DMA spare map\n");
1248		goto fail;
1249	}
1250
1251	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1252		data = &sc->jrxq.jdata[i];
1253		data->rx_data_map = NULL;
1254		data->m = NULL;
1255		error = bus_dmamap_create(ring->jrx_data_tag, 0,
1256		    &data->rx_data_map);
1257		if (error != 0) {
1258			device_printf(sc->nfe_dev,
1259			    "could not create jumbo Rx DMA map\n");
1260			goto fail;
1261		}
1262	}
1263
1264	/* Allocate DMA'able memory and load the DMA map for jumbo buf. */
1265	error = bus_dmamem_alloc(ring->jrx_jumbo_tag, (void **)&ring->jpool,
1266	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1267	    &ring->jrx_jumbo_map);
1268	if (error != 0) {
1269		device_printf(sc->nfe_dev,
1270		    "could not allocate DMA'able memory for jumbo pool\n");
1271		goto fail;
1272	}
1273
1274	ctx.nfe_busaddr = 0;
1275	error = bus_dmamap_load(ring->jrx_jumbo_tag, ring->jrx_jumbo_map,
1276	    ring->jpool, NFE_JMEM, nfe_dma_map_segs, &ctx, 0);
1277	if (error != 0) {
1278		device_printf(sc->nfe_dev,
1279		    "could not load DMA'able memory for jumbo pool\n");
1280		goto fail;
1281	}
1282
1283	/*
1284	 * Now divide it up into 9K pieces and save the addresses
1285	 * in an array.
1286	 */
1287	ptr = ring->jpool;
1288	for (i = 0; i < NFE_JSLOTS; i++) {
1289		ring->jslots[i] = ptr;
1290		ptr += NFE_JLEN;
1291		entry = malloc(sizeof(struct nfe_jpool_entry), M_DEVBUF,
1292		    M_WAITOK);
1293		if (entry == NULL) {
1294			device_printf(sc->nfe_dev,
1295			    "no memory for jumbo buffers!\n");
1296			error = ENOMEM;
1297			goto fail;
1298		}
1299		entry->slot = i;
1300		SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
1301		    jpool_entries);
1302	}
1303
1304	return (0);
1305
1306fail:
1307	nfe_free_jrx_ring(sc, ring);
1308	return (error);
1309}
1310
1311
1312static int
1313nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1314{
1315	void *desc;
1316	size_t descsize;
1317	int i;
1318
1319	ring->cur = ring->next = 0;
1320	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1321		desc = ring->desc64;
1322		descsize = sizeof (struct nfe_desc64);
1323	} else {
1324		desc = ring->desc32;
1325		descsize = sizeof (struct nfe_desc32);
1326	}
1327	bzero(desc, descsize * NFE_RX_RING_COUNT);
1328	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1329		if (nfe_newbuf(sc, i) != 0)
1330			return (ENOBUFS);
1331	}
1332
1333	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1334	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1335
1336	return (0);
1337}
1338
1339
1340static int
1341nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1342{
1343	void *desc;
1344	size_t descsize;
1345	int i;
1346
1347	ring->jcur = ring->jnext = 0;
1348	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1349		desc = ring->jdesc64;
1350		descsize = sizeof (struct nfe_desc64);
1351	} else {
1352		desc = ring->jdesc32;
1353		descsize = sizeof (struct nfe_desc32);
1354	}
1355	bzero(desc, descsize * NFE_RX_RING_COUNT);
1356	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1357		if (nfe_jnewbuf(sc, i) != 0)
1358			return (ENOBUFS);
1359	}
1360
1361	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1362	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1363
1364	return (0);
1365}
1366
1367
1368static void
1369nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1370{
1371	struct nfe_rx_data *data;
1372	void *desc;
1373	int i, descsize;
1374
1375	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1376		desc = ring->desc64;
1377		descsize = sizeof (struct nfe_desc64);
1378	} else {
1379		desc = ring->desc32;
1380		descsize = sizeof (struct nfe_desc32);
1381	}
1382
1383	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1384		data = &ring->data[i];
1385		if (data->rx_data_map != NULL) {
1386			bus_dmamap_destroy(ring->rx_data_tag,
1387			    data->rx_data_map);
1388			data->rx_data_map = NULL;
1389		}
1390		if (data->m != NULL) {
1391			m_freem(data->m);
1392			data->m = NULL;
1393		}
1394	}
1395	if (ring->rx_data_tag != NULL) {
1396		if (ring->rx_spare_map != NULL) {
1397			bus_dmamap_destroy(ring->rx_data_tag,
1398			    ring->rx_spare_map);
1399			ring->rx_spare_map = NULL;
1400		}
1401		bus_dma_tag_destroy(ring->rx_data_tag);
1402		ring->rx_data_tag = NULL;
1403	}
1404
1405	if (desc != NULL) {
1406		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1407		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1408		ring->desc64 = NULL;
1409		ring->desc32 = NULL;
1410		ring->rx_desc_map = NULL;
1411	}
1412	if (ring->rx_desc_tag != NULL) {
1413		bus_dma_tag_destroy(ring->rx_desc_tag);
1414		ring->rx_desc_tag = NULL;
1415	}
1416}
1417
1418
1419static void
1420nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1421{
1422	struct nfe_jpool_entry *entry;
1423	struct nfe_rx_data *data;
1424	void *desc;
1425	int i, descsize;
1426
1427	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1428		return;
1429
1430	NFE_JLIST_LOCK(sc);
1431	while ((entry = SLIST_FIRST(&sc->nfe_jinuse_listhead))) {
1432		device_printf(sc->nfe_dev,
1433		    "asked to free buffer that is in use!\n");
1434		SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
1435		SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
1436		    jpool_entries);
1437	}
1438
1439	while (!SLIST_EMPTY(&sc->nfe_jfree_listhead)) {
1440		entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
1441		SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
1442		free(entry, M_DEVBUF);
1443	}
1444        NFE_JLIST_UNLOCK(sc);
1445
1446	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1447		desc = ring->jdesc64;
1448		descsize = sizeof (struct nfe_desc64);
1449	} else {
1450		desc = ring->jdesc32;
1451		descsize = sizeof (struct nfe_desc32);
1452	}
1453
1454	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1455		data = &ring->jdata[i];
1456		if (data->rx_data_map != NULL) {
1457			bus_dmamap_destroy(ring->jrx_data_tag,
1458			    data->rx_data_map);
1459			data->rx_data_map = NULL;
1460		}
1461		if (data->m != NULL) {
1462			m_freem(data->m);
1463			data->m = NULL;
1464		}
1465	}
1466	if (ring->jrx_data_tag != NULL) {
1467		if (ring->jrx_spare_map != NULL) {
1468			bus_dmamap_destroy(ring->jrx_data_tag,
1469			    ring->jrx_spare_map);
1470			ring->jrx_spare_map = NULL;
1471		}
1472		bus_dma_tag_destroy(ring->jrx_data_tag);
1473		ring->jrx_data_tag = NULL;
1474	}
1475
1476	if (desc != NULL) {
1477		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1478		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1479		ring->jdesc64 = NULL;
1480		ring->jdesc32 = NULL;
1481		ring->jrx_desc_map = NULL;
1482	}
1483	/* Destroy jumbo buffer block. */
1484	if (ring->jrx_jumbo_map != NULL)
1485		bus_dmamap_unload(ring->jrx_jumbo_tag, ring->jrx_jumbo_map);
1486	if (ring->jrx_jumbo_map != NULL) {
1487		bus_dmamem_free(ring->jrx_jumbo_tag, ring->jpool,
1488		    ring->jrx_jumbo_map);
1489		ring->jpool = NULL;
1490		ring->jrx_jumbo_map = NULL;
1491	}
1492	if (ring->jrx_desc_tag != NULL) {
1493		bus_dma_tag_destroy(ring->jrx_desc_tag);
1494		ring->jrx_desc_tag = NULL;
1495	}
1496}
1497
1498
1499static int
1500nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1501{
1502	struct nfe_dmamap_arg ctx;
1503	int i, error;
1504	void *desc;
1505	int descsize;
1506
1507	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1508		desc = ring->desc64;
1509		descsize = sizeof (struct nfe_desc64);
1510	} else {
1511		desc = ring->desc32;
1512		descsize = sizeof (struct nfe_desc32);
1513	}
1514
1515	ring->queued = 0;
1516	ring->cur = ring->next = 0;
1517
1518	error = bus_dma_tag_create(sc->nfe_parent_tag,
1519	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1520	    BUS_SPACE_MAXADDR,			/* lowaddr */
1521	    BUS_SPACE_MAXADDR,			/* highaddr */
1522	    NULL, NULL,				/* filter, filterarg */
1523	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1524	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
1525	    0,					/* flags */
1526	    NULL, NULL,				/* lockfunc, lockarg */
1527	    &ring->tx_desc_tag);
1528	if (error != 0) {
1529		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1530		goto fail;
1531	}
1532
1533	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1534	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1535	if (error != 0) {
1536		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1537		goto fail;
1538	}
1539	if (sc->nfe_flags & NFE_40BIT_ADDR)
1540		ring->desc64 = desc;
1541	else
1542		ring->desc32 = desc;
1543
1544	ctx.nfe_busaddr = 0;
1545	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1546	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1547	if (error != 0) {
1548		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1549		goto fail;
1550	}
1551	ring->physaddr = ctx.nfe_busaddr;
1552
1553	error = bus_dma_tag_create(sc->nfe_parent_tag,
1554	    1, 0,
1555	    BUS_SPACE_MAXADDR,
1556	    BUS_SPACE_MAXADDR,
1557	    NULL, NULL,
1558	    MCLBYTES * NFE_MAX_SCATTER,
1559	    NFE_MAX_SCATTER,
1560	    MCLBYTES,
1561	    0,
1562	    NULL, NULL,
1563	    &ring->tx_data_tag);
1564	if (error != 0) {
1565		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1566		goto fail;
1567	}
1568
1569	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1570		error = bus_dmamap_create(ring->tx_data_tag, 0,
1571		    &ring->data[i].tx_data_map);
1572		if (error != 0) {
1573			device_printf(sc->nfe_dev,
1574			    "could not create Tx DMA map\n");
1575			goto fail;
1576		}
1577	}
1578
1579fail:
1580	return (error);
1581}
1582
1583
1584static void
1585nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1586{
1587	void *desc;
1588	size_t descsize;
1589
1590	sc->nfe_force_tx = 0;
1591	ring->queued = 0;
1592	ring->cur = ring->next = 0;
1593	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1594		desc = ring->desc64;
1595		descsize = sizeof (struct nfe_desc64);
1596	} else {
1597		desc = ring->desc32;
1598		descsize = sizeof (struct nfe_desc32);
1599	}
1600	bzero(desc, descsize * NFE_TX_RING_COUNT);
1601
1602	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1603	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1604}
1605
1606
1607static void
1608nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1609{
1610	struct nfe_tx_data *data;
1611	void *desc;
1612	int i, descsize;
1613
1614	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1615		desc = ring->desc64;
1616		descsize = sizeof (struct nfe_desc64);
1617	} else {
1618		desc = ring->desc32;
1619		descsize = sizeof (struct nfe_desc32);
1620	}
1621
1622	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1623		data = &ring->data[i];
1624
1625		if (data->m != NULL) {
1626			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1627			    BUS_DMASYNC_POSTWRITE);
1628			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1629			m_freem(data->m);
1630			data->m = NULL;
1631		}
1632		if (data->tx_data_map != NULL) {
1633			bus_dmamap_destroy(ring->tx_data_tag,
1634			    data->tx_data_map);
1635			data->tx_data_map = NULL;
1636		}
1637	}
1638
1639	if (ring->tx_data_tag != NULL) {
1640		bus_dma_tag_destroy(ring->tx_data_tag);
1641		ring->tx_data_tag = NULL;
1642	}
1643
1644	if (desc != NULL) {
1645		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1646		    BUS_DMASYNC_POSTWRITE);
1647		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1648		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1649		ring->desc64 = NULL;
1650		ring->desc32 = NULL;
1651		ring->tx_desc_map = NULL;
1652		bus_dma_tag_destroy(ring->tx_desc_tag);
1653		ring->tx_desc_tag = NULL;
1654	}
1655}
1656
1657#ifdef DEVICE_POLLING
1658static poll_handler_t nfe_poll;
1659
1660
1661static void
1662nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1663{
1664	struct nfe_softc *sc = ifp->if_softc;
1665	uint32_t r;
1666
1667	NFE_LOCK(sc);
1668
1669	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1670		NFE_UNLOCK(sc);
1671		return;
1672	}
1673
1674	nfe_rxeof(sc, count);
1675	nfe_txeof(sc);
1676	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1677		taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task);
1678
1679	if (cmd == POLL_AND_CHECK_STATUS) {
1680		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1681			NFE_UNLOCK(sc);
1682			return;
1683		}
1684		NFE_WRITE(sc, sc->nfe_irq_status, r);
1685
1686		if (r & NFE_IRQ_LINK) {
1687			NFE_READ(sc, NFE_PHY_STATUS);
1688			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1689			DPRINTF(sc, "link state changed\n");
1690		}
1691	}
1692	NFE_UNLOCK(sc);
1693}
1694#endif /* DEVICE_POLLING */
1695
1696static void
1697nfe_set_intr(struct nfe_softc *sc)
1698{
1699
1700	if (sc->nfe_msi != 0)
1701		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1702}
1703
1704
1705/* In MSIX, a write to mask reegisters behaves as XOR. */
1706static __inline void
1707nfe_enable_intr(struct nfe_softc *sc)
1708{
1709
1710	if (sc->nfe_msix != 0) {
1711		/* XXX Should have a better way to enable interrupts! */
1712		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1713			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1714	} else
1715		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1716}
1717
1718
1719static __inline void
1720nfe_disable_intr(struct nfe_softc *sc)
1721{
1722
1723	if (sc->nfe_msix != 0) {
1724		/* XXX Should have a better way to disable interrupts! */
1725		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1726			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1727	} else
1728		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1729}
1730
1731
1732static int
1733nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1734{
1735	struct nfe_softc *sc;
1736	struct ifreq *ifr;
1737	struct mii_data *mii;
1738	int error, init, mask;
1739
1740	sc = ifp->if_softc;
1741	ifr = (struct ifreq *) data;
1742	error = 0;
1743	init = 0;
1744	switch (cmd) {
1745	case SIOCSIFMTU:
1746		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1747			error = EINVAL;
1748		else if (ifp->if_mtu != ifr->ifr_mtu) {
1749			if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0 &&
1750			    ifr->ifr_mtu > ETHERMTU)
1751				error = EINVAL;
1752			else {
1753				NFE_LOCK(sc);
1754				ifp->if_mtu = ifr->ifr_mtu;
1755				if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1756					nfe_init_locked(sc);
1757				NFE_UNLOCK(sc);
1758			}
1759		}
1760		break;
1761	case SIOCSIFFLAGS:
1762		NFE_LOCK(sc);
1763		if (ifp->if_flags & IFF_UP) {
1764			/*
1765			 * If only the PROMISC or ALLMULTI flag changes, then
1766			 * don't do a full re-init of the chip, just update
1767			 * the Rx filter.
1768			 */
1769			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1770			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1771			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1772				nfe_setmulti(sc);
1773			else
1774				nfe_init_locked(sc);
1775		} else {
1776			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1777				nfe_stop(ifp);
1778		}
1779		sc->nfe_if_flags = ifp->if_flags;
1780		NFE_UNLOCK(sc);
1781		error = 0;
1782		break;
1783	case SIOCADDMULTI:
1784	case SIOCDELMULTI:
1785		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1786			NFE_LOCK(sc);
1787			nfe_setmulti(sc);
1788			NFE_UNLOCK(sc);
1789			error = 0;
1790		}
1791		break;
1792	case SIOCSIFMEDIA:
1793	case SIOCGIFMEDIA:
1794		mii = device_get_softc(sc->nfe_miibus);
1795		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1796		break;
1797	case SIOCSIFCAP:
1798		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1799#ifdef DEVICE_POLLING
1800		if ((mask & IFCAP_POLLING) != 0) {
1801			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1802				error = ether_poll_register(nfe_poll, ifp);
1803				if (error)
1804					break;
1805				NFE_LOCK(sc);
1806				nfe_disable_intr(sc);
1807				ifp->if_capenable |= IFCAP_POLLING;
1808				NFE_UNLOCK(sc);
1809			} else {
1810				error = ether_poll_deregister(ifp);
1811				/* Enable interrupt even in error case */
1812				NFE_LOCK(sc);
1813				nfe_enable_intr(sc);
1814				ifp->if_capenable &= ~IFCAP_POLLING;
1815				NFE_UNLOCK(sc);
1816			}
1817		}
1818#endif /* DEVICE_POLLING */
1819		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1820		    (mask & IFCAP_HWCSUM) != 0) {
1821			ifp->if_capenable ^= IFCAP_HWCSUM;
1822			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1823			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1824				ifp->if_hwassist |= NFE_CSUM_FEATURES;
1825			else
1826				ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1827			init++;
1828		}
1829		if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1830		    (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1831			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1832			init++;
1833		}
1834		/*
1835		 * XXX
1836		 * It seems that VLAN stripping requires Rx checksum offload.
1837		 * Unfortunately FreeBSD has no way to disable only Rx side
1838		 * VLAN stripping. So when we know Rx checksum offload is
1839		 * disabled turn entire hardware VLAN assist off.
1840		 */
1841		if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1842		    (NFE_HW_CSUM | NFE_HW_VLAN)) {
1843			if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1844				ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1845		}
1846
1847		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1848		    (mask & IFCAP_TSO4) != 0) {
1849			ifp->if_capenable ^= IFCAP_TSO4;
1850			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1851			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1852				ifp->if_hwassist |= CSUM_TSO;
1853			else
1854				ifp->if_hwassist &= ~CSUM_TSO;
1855		}
1856
1857		if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1858			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1859			nfe_init(sc);
1860		}
1861		if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1862			VLAN_CAPABILITIES(ifp);
1863		break;
1864	default:
1865		error = ether_ioctl(ifp, cmd, data);
1866		break;
1867	}
1868
1869	return (error);
1870}
1871
1872
1873static int
1874nfe_intr(void *arg)
1875{
1876	struct nfe_softc *sc;
1877	uint32_t status;
1878
1879	sc = (struct nfe_softc *)arg;
1880
1881	status = NFE_READ(sc, sc->nfe_irq_status);
1882	if (status == 0 || status == 0xffffffff)
1883		return (FILTER_STRAY);
1884	nfe_disable_intr(sc);
1885	taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task);
1886
1887	return (FILTER_HANDLED);
1888}
1889
1890
1891static void
1892nfe_int_task(void *arg, int pending)
1893{
1894	struct nfe_softc *sc = arg;
1895	struct ifnet *ifp = sc->nfe_ifp;
1896	uint32_t r;
1897	int domore;
1898
1899	NFE_LOCK(sc);
1900
1901	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1902		nfe_enable_intr(sc);
1903		NFE_UNLOCK(sc);
1904		return;	/* not for us */
1905	}
1906	NFE_WRITE(sc, sc->nfe_irq_status, r);
1907
1908	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1909
1910#ifdef DEVICE_POLLING
1911	if (ifp->if_capenable & IFCAP_POLLING) {
1912		NFE_UNLOCK(sc);
1913		return;
1914	}
1915#endif
1916
1917	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1918		NFE_UNLOCK(sc);
1919		nfe_enable_intr(sc);
1920		return;
1921	}
1922
1923	if (r & NFE_IRQ_LINK) {
1924		NFE_READ(sc, NFE_PHY_STATUS);
1925		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1926		DPRINTF(sc, "link state changed\n");
1927	}
1928
1929	domore = 0;
1930	/* check Rx ring */
1931	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1932		domore = nfe_jrxeof(sc, sc->nfe_process_limit);
1933	else
1934		domore = nfe_rxeof(sc, sc->nfe_process_limit);
1935	/* check Tx ring */
1936	nfe_txeof(sc);
1937
1938	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1939		taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task);
1940
1941	NFE_UNLOCK(sc);
1942
1943	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1944		taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task);
1945		return;
1946	}
1947
1948	/* Reenable interrupts. */
1949	nfe_enable_intr(sc);
1950}
1951
1952
1953static __inline void
1954nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1955{
1956	struct nfe_desc32 *desc32;
1957	struct nfe_desc64 *desc64;
1958	struct nfe_rx_data *data;
1959	struct mbuf *m;
1960
1961	data = &sc->rxq.data[idx];
1962	m = data->m;
1963
1964	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1965		desc64 = &sc->rxq.desc64[idx];
1966		/* VLAN packet may have overwritten it. */
1967		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1968		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1969		desc64->length = htole16(m->m_len);
1970		desc64->flags = htole16(NFE_RX_READY);
1971	} else {
1972		desc32 = &sc->rxq.desc32[idx];
1973		desc32->length = htole16(m->m_len);
1974		desc32->flags = htole16(NFE_RX_READY);
1975	}
1976}
1977
1978
1979static __inline void
1980nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1981{
1982	struct nfe_desc32 *desc32;
1983	struct nfe_desc64 *desc64;
1984	struct nfe_rx_data *data;
1985	struct mbuf *m;
1986
1987	data = &sc->jrxq.jdata[idx];
1988	m = data->m;
1989
1990	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1991		desc64 = &sc->jrxq.jdesc64[idx];
1992		/* VLAN packet may have overwritten it. */
1993		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1994		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1995		desc64->length = htole16(m->m_len);
1996		desc64->flags = htole16(NFE_RX_READY);
1997	} else {
1998		desc32 = &sc->jrxq.jdesc32[idx];
1999		desc32->length = htole16(m->m_len);
2000		desc32->flags = htole16(NFE_RX_READY);
2001	}
2002}
2003
2004
2005static int
2006nfe_newbuf(struct nfe_softc *sc, int idx)
2007{
2008	struct nfe_rx_data *data;
2009	struct nfe_desc32 *desc32;
2010	struct nfe_desc64 *desc64;
2011	struct mbuf *m;
2012	bus_dma_segment_t segs[1];
2013	bus_dmamap_t map;
2014	int nsegs;
2015
2016	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2017	if (m == NULL)
2018		return (ENOBUFS);
2019
2020	m->m_len = m->m_pkthdr.len = MCLBYTES;
2021	m_adj(m, ETHER_ALIGN);
2022
2023	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
2024	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2025		m_freem(m);
2026		return (ENOBUFS);
2027	}
2028	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2029
2030	data = &sc->rxq.data[idx];
2031	if (data->m != NULL) {
2032		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2033		    BUS_DMASYNC_POSTREAD);
2034		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2035	}
2036	map = data->rx_data_map;
2037	data->rx_data_map = sc->rxq.rx_spare_map;
2038	sc->rxq.rx_spare_map = map;
2039	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2040	    BUS_DMASYNC_PREREAD);
2041	data->paddr = segs[0].ds_addr;
2042	data->m = m;
2043	/* update mapping address in h/w descriptor */
2044	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2045		desc64 = &sc->rxq.desc64[idx];
2046		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2047		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2048		desc64->length = htole16(segs[0].ds_len);
2049		desc64->flags = htole16(NFE_RX_READY);
2050	} else {
2051		desc32 = &sc->rxq.desc32[idx];
2052		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2053		desc32->length = htole16(segs[0].ds_len);
2054		desc32->flags = htole16(NFE_RX_READY);
2055	}
2056
2057	return (0);
2058}
2059
2060
2061static int
2062nfe_jnewbuf(struct nfe_softc *sc, int idx)
2063{
2064	struct nfe_rx_data *data;
2065	struct nfe_desc32 *desc32;
2066	struct nfe_desc64 *desc64;
2067	struct mbuf *m;
2068	bus_dma_segment_t segs[1];
2069	bus_dmamap_t map;
2070	int nsegs;
2071	void *buf;
2072
2073	MGETHDR(m, M_DONTWAIT, MT_DATA);
2074	if (m == NULL)
2075		return (ENOBUFS);
2076	buf = nfe_jalloc(sc);
2077	if (buf == NULL) {
2078		m_freem(m);
2079		return (ENOBUFS);
2080	}
2081	/* Attach the buffer to the mbuf. */
2082	MEXTADD(m, buf, NFE_JLEN, nfe_jfree, (struct nfe_softc *)sc, 0,
2083	    EXT_NET_DRV);
2084	if ((m->m_flags & M_EXT) == 0) {
2085		m_freem(m);
2086		return (ENOBUFS);
2087	}
2088	m->m_pkthdr.len = m->m_len = NFE_JLEN;
2089	m_adj(m, ETHER_ALIGN);
2090
2091	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2092	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2093		m_freem(m);
2094		return (ENOBUFS);
2095	}
2096	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2097
2098	data = &sc->jrxq.jdata[idx];
2099	if (data->m != NULL) {
2100		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2101		    BUS_DMASYNC_POSTREAD);
2102		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2103	}
2104	map = data->rx_data_map;
2105	data->rx_data_map = sc->jrxq.jrx_spare_map;
2106	sc->jrxq.jrx_spare_map = map;
2107	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2108	    BUS_DMASYNC_PREREAD);
2109	data->paddr = segs[0].ds_addr;
2110	data->m = m;
2111	/* update mapping address in h/w descriptor */
2112	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2113		desc64 = &sc->jrxq.jdesc64[idx];
2114		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2115		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2116		desc64->length = htole16(segs[0].ds_len);
2117		desc64->flags = htole16(NFE_RX_READY);
2118	} else {
2119		desc32 = &sc->jrxq.jdesc32[idx];
2120		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2121		desc32->length = htole16(segs[0].ds_len);
2122		desc32->flags = htole16(NFE_RX_READY);
2123	}
2124
2125	return (0);
2126}
2127
2128
2129static int
2130nfe_rxeof(struct nfe_softc *sc, int count)
2131{
2132	struct ifnet *ifp = sc->nfe_ifp;
2133	struct nfe_desc32 *desc32;
2134	struct nfe_desc64 *desc64;
2135	struct nfe_rx_data *data;
2136	struct mbuf *m;
2137	uint16_t flags;
2138	int len, prog;
2139	uint32_t vtag = 0;
2140
2141	NFE_LOCK_ASSERT(sc);
2142
2143	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2144	    BUS_DMASYNC_POSTREAD);
2145
2146	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2147		if (count <= 0)
2148			break;
2149		count--;
2150
2151		data = &sc->rxq.data[sc->rxq.cur];
2152
2153		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2154			desc64 = &sc->rxq.desc64[sc->rxq.cur];
2155			vtag = le32toh(desc64->physaddr[1]);
2156			flags = le16toh(desc64->flags);
2157			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2158		} else {
2159			desc32 = &sc->rxq.desc32[sc->rxq.cur];
2160			flags = le16toh(desc32->flags);
2161			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2162		}
2163
2164		if (flags & NFE_RX_READY)
2165			break;
2166		prog++;
2167		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2168			if (!(flags & NFE_RX_VALID_V1)) {
2169				ifp->if_ierrors++;
2170				nfe_discard_rxbuf(sc, sc->rxq.cur);
2171				continue;
2172			}
2173			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2174				flags &= ~NFE_RX_ERROR;
2175				len--;	/* fix buffer length */
2176			}
2177		} else {
2178			if (!(flags & NFE_RX_VALID_V2)) {
2179				ifp->if_ierrors++;
2180				nfe_discard_rxbuf(sc, sc->rxq.cur);
2181				continue;
2182			}
2183
2184			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2185				flags &= ~NFE_RX_ERROR;
2186				len--;	/* fix buffer length */
2187			}
2188		}
2189
2190		if (flags & NFE_RX_ERROR) {
2191			ifp->if_ierrors++;
2192			nfe_discard_rxbuf(sc, sc->rxq.cur);
2193			continue;
2194		}
2195
2196		m = data->m;
2197		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2198			ifp->if_iqdrops++;
2199			nfe_discard_rxbuf(sc, sc->rxq.cur);
2200			continue;
2201		}
2202
2203		if ((vtag & NFE_RX_VTAG) != 0 &&
2204		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2205			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2206			m->m_flags |= M_VLANTAG;
2207		}
2208
2209		m->m_pkthdr.len = m->m_len = len;
2210		m->m_pkthdr.rcvif = ifp;
2211
2212		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2213			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2214				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2215				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2216				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2217				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2218					m->m_pkthdr.csum_flags |=
2219					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2220					m->m_pkthdr.csum_data = 0xffff;
2221				}
2222			}
2223		}
2224
2225		ifp->if_ipackets++;
2226
2227		NFE_UNLOCK(sc);
2228		(*ifp->if_input)(ifp, m);
2229		NFE_LOCK(sc);
2230	}
2231
2232	if (prog > 0)
2233		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2234		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2235
2236	return (count > 0 ? 0 : EAGAIN);
2237}
2238
2239
2240static int
2241nfe_jrxeof(struct nfe_softc *sc, int count)
2242{
2243	struct ifnet *ifp = sc->nfe_ifp;
2244	struct nfe_desc32 *desc32;
2245	struct nfe_desc64 *desc64;
2246	struct nfe_rx_data *data;
2247	struct mbuf *m;
2248	uint16_t flags;
2249	int len, prog;
2250	uint32_t vtag = 0;
2251
2252	NFE_LOCK_ASSERT(sc);
2253
2254	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2255	    BUS_DMASYNC_POSTREAD);
2256
2257	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2258	    vtag = 0) {
2259		if (count <= 0)
2260			break;
2261		count--;
2262
2263		data = &sc->jrxq.jdata[sc->jrxq.jcur];
2264
2265		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2266			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2267			vtag = le32toh(desc64->physaddr[1]);
2268			flags = le16toh(desc64->flags);
2269			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2270		} else {
2271			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2272			flags = le16toh(desc32->flags);
2273			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2274		}
2275
2276		if (flags & NFE_RX_READY)
2277			break;
2278		prog++;
2279		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2280			if (!(flags & NFE_RX_VALID_V1)) {
2281				ifp->if_ierrors++;
2282				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2283				continue;
2284			}
2285			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2286				flags &= ~NFE_RX_ERROR;
2287				len--;	/* fix buffer length */
2288			}
2289		} else {
2290			if (!(flags & NFE_RX_VALID_V2)) {
2291				ifp->if_ierrors++;
2292				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2293				continue;
2294			}
2295
2296			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2297				flags &= ~NFE_RX_ERROR;
2298				len--;	/* fix buffer length */
2299			}
2300		}
2301
2302		if (flags & NFE_RX_ERROR) {
2303			ifp->if_ierrors++;
2304			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2305			continue;
2306		}
2307
2308		m = data->m;
2309		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2310			ifp->if_iqdrops++;
2311			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2312			continue;
2313		}
2314
2315		if ((vtag & NFE_RX_VTAG) != 0 &&
2316		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2317			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2318			m->m_flags |= M_VLANTAG;
2319		}
2320
2321		m->m_pkthdr.len = m->m_len = len;
2322		m->m_pkthdr.rcvif = ifp;
2323
2324		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2325			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2326				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2327				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2328				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2329				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2330					m->m_pkthdr.csum_flags |=
2331					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2332					m->m_pkthdr.csum_data = 0xffff;
2333				}
2334			}
2335		}
2336
2337		ifp->if_ipackets++;
2338
2339		NFE_UNLOCK(sc);
2340		(*ifp->if_input)(ifp, m);
2341		NFE_LOCK(sc);
2342	}
2343
2344	if (prog > 0)
2345		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2346		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2347
2348	return (count > 0 ? 0 : EAGAIN);
2349}
2350
2351
2352static void
2353nfe_txeof(struct nfe_softc *sc)
2354{
2355	struct ifnet *ifp = sc->nfe_ifp;
2356	struct nfe_desc32 *desc32;
2357	struct nfe_desc64 *desc64;
2358	struct nfe_tx_data *data = NULL;
2359	uint16_t flags;
2360	int cons, prog;
2361
2362	NFE_LOCK_ASSERT(sc);
2363
2364	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2365	    BUS_DMASYNC_POSTREAD);
2366
2367	prog = 0;
2368	for (cons = sc->txq.next; cons != sc->txq.cur;
2369	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
2370		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2371			desc64 = &sc->txq.desc64[cons];
2372			flags = le16toh(desc64->flags);
2373		} else {
2374			desc32 = &sc->txq.desc32[cons];
2375			flags = le16toh(desc32->flags);
2376		}
2377
2378		if (flags & NFE_TX_VALID)
2379			break;
2380
2381		prog++;
2382		sc->txq.queued--;
2383		data = &sc->txq.data[cons];
2384
2385		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2386			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2387				continue;
2388			if ((flags & NFE_TX_ERROR_V1) != 0) {
2389				device_printf(sc->nfe_dev,
2390				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2391
2392				ifp->if_oerrors++;
2393			} else
2394				ifp->if_opackets++;
2395		} else {
2396			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2397				continue;
2398			if ((flags & NFE_TX_ERROR_V2) != 0) {
2399				device_printf(sc->nfe_dev,
2400				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2401				ifp->if_oerrors++;
2402			} else
2403				ifp->if_opackets++;
2404		}
2405
2406		/* last fragment of the mbuf chain transmitted */
2407		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2408		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2409		    BUS_DMASYNC_POSTWRITE);
2410		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2411		m_freem(data->m);
2412		data->m = NULL;
2413	}
2414
2415	if (prog > 0) {
2416		sc->nfe_force_tx = 0;
2417		sc->txq.next = cons;
2418		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2419		if (sc->txq.queued == 0)
2420			sc->nfe_watchdog_timer = 0;
2421	}
2422}
2423
2424/*
2425 * It's copy of ath_defrag(ath(4)).
2426 *
2427 * Defragment an mbuf chain, returning at most maxfrags separate
2428 * mbufs+clusters.  If this is not possible NULL is returned and
2429 * the original mbuf chain is left in it's present (potentially
2430 * modified) state.  We use two techniques: collapsing consecutive
2431 * mbufs and replacing consecutive mbufs by a cluster.
2432 */
2433static struct mbuf *
2434nfe_defrag(struct mbuf *m0, int how, int maxfrags)
2435{
2436	struct mbuf *m, *n, *n2, **prev;
2437	u_int curfrags;
2438
2439	/*
2440	 * Calculate the current number of frags.
2441	 */
2442	curfrags = 0;
2443	for (m = m0; m != NULL; m = m->m_next)
2444		curfrags++;
2445	/*
2446	 * First, try to collapse mbufs.  Note that we always collapse
2447	 * towards the front so we don't need to deal with moving the
2448	 * pkthdr.  This may be suboptimal if the first mbuf has much
2449	 * less data than the following.
2450	 */
2451	m = m0;
2452again:
2453	for (;;) {
2454		n = m->m_next;
2455		if (n == NULL)
2456			break;
2457		if ((m->m_flags & M_RDONLY) == 0 &&
2458		    n->m_len < M_TRAILINGSPACE(m)) {
2459			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
2460				n->m_len);
2461			m->m_len += n->m_len;
2462			m->m_next = n->m_next;
2463			m_free(n);
2464			if (--curfrags <= maxfrags)
2465				return (m0);
2466		} else
2467			m = n;
2468	}
2469	KASSERT(maxfrags > 1,
2470		("maxfrags %u, but normal collapse failed", maxfrags));
2471	/*
2472	 * Collapse consecutive mbufs to a cluster.
2473	 */
2474	prev = &m0->m_next;		/* NB: not the first mbuf */
2475	while ((n = *prev) != NULL) {
2476		if ((n2 = n->m_next) != NULL &&
2477		    n->m_len + n2->m_len < MCLBYTES) {
2478			m = m_getcl(how, MT_DATA, 0);
2479			if (m == NULL)
2480				goto bad;
2481			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
2482			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
2483				n2->m_len);
2484			m->m_len = n->m_len + n2->m_len;
2485			m->m_next = n2->m_next;
2486			*prev = m;
2487			m_free(n);
2488			m_free(n2);
2489			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
2490				return m0;
2491			/*
2492			 * Still not there, try the normal collapse
2493			 * again before we allocate another cluster.
2494			 */
2495			goto again;
2496		}
2497		prev = &n->m_next;
2498	}
2499	/*
2500	 * No place where we can collapse to a cluster; punt.
2501	 * This can occur if, for example, you request 2 frags
2502	 * but the packet requires that both be clusters (we
2503	 * never reallocate the first mbuf to avoid moving the
2504	 * packet header).
2505	 */
2506bad:
2507	return (NULL);
2508}
2509
2510
2511static int
2512nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2513{
2514	struct nfe_desc32 *desc32 = NULL;
2515	struct nfe_desc64 *desc64 = NULL;
2516	bus_dmamap_t map;
2517	bus_dma_segment_t segs[NFE_MAX_SCATTER];
2518	int error, i, nsegs, prod, si;
2519	uint32_t tso_segsz;
2520	uint16_t cflags, flags;
2521	struct mbuf *m;
2522
2523	prod = si = sc->txq.cur;
2524	map = sc->txq.data[prod].tx_data_map;
2525
2526	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2527	    &nsegs, BUS_DMA_NOWAIT);
2528	if (error == EFBIG) {
2529		m = nfe_defrag(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2530		if (m == NULL) {
2531			m_freem(*m_head);
2532			*m_head = NULL;
2533			return (ENOBUFS);
2534		}
2535		*m_head = m;
2536		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2537		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2538		if (error != 0) {
2539			m_freem(*m_head);
2540			*m_head = NULL;
2541			return (ENOBUFS);
2542		}
2543	} else if (error != 0)
2544		return (error);
2545	if (nsegs == 0) {
2546		m_freem(*m_head);
2547		*m_head = NULL;
2548		return (EIO);
2549	}
2550
2551	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2552		bus_dmamap_unload(sc->txq.tx_data_tag, map);
2553		return (ENOBUFS);
2554	}
2555
2556	m = *m_head;
2557	cflags = flags = 0;
2558	tso_segsz = 0;
2559	if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2560		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2561			cflags |= NFE_TX_IP_CSUM;
2562		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2563			cflags |= NFE_TX_TCP_UDP_CSUM;
2564		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2565			cflags |= NFE_TX_TCP_UDP_CSUM;
2566	}
2567	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2568		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2569		    NFE_TX_TSO_SHIFT;
2570		cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2571		cflags |= NFE_TX_TSO;
2572	}
2573
2574	for (i = 0; i < nsegs; i++) {
2575		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2576			desc64 = &sc->txq.desc64[prod];
2577			desc64->physaddr[0] =
2578			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
2579			desc64->physaddr[1] =
2580			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2581			desc64->vtag = 0;
2582			desc64->length = htole16(segs[i].ds_len - 1);
2583			desc64->flags = htole16(flags);
2584		} else {
2585			desc32 = &sc->txq.desc32[prod];
2586			desc32->physaddr =
2587			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2588			desc32->length = htole16(segs[i].ds_len - 1);
2589			desc32->flags = htole16(flags);
2590		}
2591
2592		/*
2593		 * Setting of the valid bit in the first descriptor is
2594		 * deferred until the whole chain is fully setup.
2595		 */
2596		flags |= NFE_TX_VALID;
2597
2598		sc->txq.queued++;
2599		NFE_INC(prod, NFE_TX_RING_COUNT);
2600	}
2601
2602	/*
2603	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2604	 * csum flags, vtag and TSO belong to the first fragment only.
2605	 */
2606	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2607		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2608		desc64 = &sc->txq.desc64[si];
2609		if ((m->m_flags & M_VLANTAG) != 0)
2610			desc64->vtag = htole32(NFE_TX_VTAG |
2611			    m->m_pkthdr.ether_vtag);
2612		if (tso_segsz != 0) {
2613			/*
2614			 * XXX
2615			 * The following indicates the descriptor element
2616			 * is a 32bit quantity.
2617			 */
2618			desc64->length |= htole16((uint16_t)tso_segsz);
2619			desc64->flags |= htole16(tso_segsz >> 16);
2620		}
2621		/*
2622		 * finally, set the valid/checksum/TSO bit in the first
2623		 * descriptor.
2624		 */
2625		desc64->flags |= htole16(NFE_TX_VALID | cflags);
2626	} else {
2627		if (sc->nfe_flags & NFE_JUMBO_SUP)
2628			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2629		else
2630			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2631		desc32 = &sc->txq.desc32[si];
2632		if (tso_segsz != 0) {
2633			/*
2634			 * XXX
2635			 * The following indicates the descriptor element
2636			 * is a 32bit quantity.
2637			 */
2638			desc32->length |= htole16((uint16_t)tso_segsz);
2639			desc32->flags |= htole16(tso_segsz >> 16);
2640		}
2641		/*
2642		 * finally, set the valid/checksum/TSO bit in the first
2643		 * descriptor.
2644		 */
2645		desc32->flags |= htole16(NFE_TX_VALID | cflags);
2646	}
2647
2648	sc->txq.cur = prod;
2649	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2650	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2651	sc->txq.data[prod].tx_data_map = map;
2652	sc->txq.data[prod].m = m;
2653
2654	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2655
2656	return (0);
2657}
2658
2659
2660static void
2661nfe_setmulti(struct nfe_softc *sc)
2662{
2663	struct ifnet *ifp = sc->nfe_ifp;
2664	struct ifmultiaddr *ifma;
2665	int i;
2666	uint32_t filter;
2667	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2668	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2669		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2670	};
2671
2672	NFE_LOCK_ASSERT(sc);
2673
2674	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2675		bzero(addr, ETHER_ADDR_LEN);
2676		bzero(mask, ETHER_ADDR_LEN);
2677		goto done;
2678	}
2679
2680	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2681	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2682
2683	IF_ADDR_LOCK(ifp);
2684	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2685		u_char *addrp;
2686
2687		if (ifma->ifma_addr->sa_family != AF_LINK)
2688			continue;
2689
2690		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2691		for (i = 0; i < ETHER_ADDR_LEN; i++) {
2692			u_int8_t mcaddr = addrp[i];
2693			addr[i] &= mcaddr;
2694			mask[i] &= ~mcaddr;
2695		}
2696	}
2697	IF_ADDR_UNLOCK(ifp);
2698
2699	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2700		mask[i] |= addr[i];
2701	}
2702
2703done:
2704	addr[0] |= 0x01;	/* make sure multicast bit is set */
2705
2706	NFE_WRITE(sc, NFE_MULTIADDR_HI,
2707	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2708	NFE_WRITE(sc, NFE_MULTIADDR_LO,
2709	    addr[5] <<  8 | addr[4]);
2710	NFE_WRITE(sc, NFE_MULTIMASK_HI,
2711	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2712	NFE_WRITE(sc, NFE_MULTIMASK_LO,
2713	    mask[5] <<  8 | mask[4]);
2714
2715	filter = NFE_READ(sc, NFE_RXFILTER);
2716	filter &= NFE_PFF_RX_PAUSE;
2717	filter |= NFE_RXFILTER_MAGIC;
2718	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2719	NFE_WRITE(sc, NFE_RXFILTER, filter);
2720}
2721
2722
2723static void
2724nfe_tx_task(void *arg, int pending)
2725{
2726	struct ifnet *ifp;
2727
2728	ifp = (struct ifnet *)arg;
2729	nfe_start(ifp);
2730}
2731
2732
2733static void
2734nfe_start(struct ifnet *ifp)
2735{
2736	struct nfe_softc *sc = ifp->if_softc;
2737	struct mbuf *m0;
2738	int enq;
2739
2740	NFE_LOCK(sc);
2741
2742	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2743	    IFF_DRV_RUNNING || sc->nfe_link == 0) {
2744		NFE_UNLOCK(sc);
2745		return;
2746	}
2747
2748	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2749		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2750		if (m0 == NULL)
2751			break;
2752
2753		if (nfe_encap(sc, &m0) != 0) {
2754			if (m0 == NULL)
2755				break;
2756			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2757			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2758			break;
2759		}
2760		enq++;
2761		ETHER_BPF_MTAP(ifp, m0);
2762	}
2763
2764	if (enq > 0) {
2765		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2766		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2767
2768		/* kick Tx */
2769		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2770
2771		/*
2772		 * Set a timeout in case the chip goes out to lunch.
2773		 */
2774		sc->nfe_watchdog_timer = 5;
2775	}
2776
2777	NFE_UNLOCK(sc);
2778}
2779
2780
2781static void
2782nfe_watchdog(struct ifnet *ifp)
2783{
2784	struct nfe_softc *sc = ifp->if_softc;
2785
2786	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2787		return;
2788
2789	/* Check if we've lost Tx completion interrupt. */
2790	nfe_txeof(sc);
2791	if (sc->txq.queued == 0) {
2792		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2793		    "-- recovering\n");
2794		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2795			taskqueue_enqueue_fast(taskqueue_fast,
2796			    &sc->nfe_tx_task);
2797		return;
2798	}
2799	/* Check if we've lost start Tx command. */
2800	sc->nfe_force_tx++;
2801	if (sc->nfe_force_tx <= 3) {
2802		/*
2803		 * If this is the case for watchdog timeout, the following
2804		 * code should go to nfe_txeof().
2805		 */
2806		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2807		return;
2808	}
2809	sc->nfe_force_tx = 0;
2810
2811	if_printf(ifp, "watchdog timeout\n");
2812
2813	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2814	ifp->if_oerrors++;
2815	nfe_init_locked(sc);
2816}
2817
2818
2819static void
2820nfe_init(void *xsc)
2821{
2822	struct nfe_softc *sc = xsc;
2823
2824	NFE_LOCK(sc);
2825	nfe_init_locked(sc);
2826	NFE_UNLOCK(sc);
2827}
2828
2829
2830static void
2831nfe_init_locked(void *xsc)
2832{
2833	struct nfe_softc *sc = xsc;
2834	struct ifnet *ifp = sc->nfe_ifp;
2835	struct mii_data *mii;
2836	uint32_t val;
2837	int error;
2838
2839	NFE_LOCK_ASSERT(sc);
2840
2841	mii = device_get_softc(sc->nfe_miibus);
2842
2843	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2844		return;
2845
2846	nfe_stop(ifp);
2847
2848	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2849
2850	nfe_init_tx_ring(sc, &sc->txq);
2851	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2852		error = nfe_init_jrx_ring(sc, &sc->jrxq);
2853	else
2854		error = nfe_init_rx_ring(sc, &sc->rxq);
2855	if (error != 0) {
2856		device_printf(sc->nfe_dev,
2857		    "initialization failed: no memory for rx buffers\n");
2858		nfe_stop(ifp);
2859		return;
2860	}
2861
2862	val = 0;
2863	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2864		val |= NFE_MAC_ADDR_INORDER;
2865	NFE_WRITE(sc, NFE_TX_UNK, val);
2866	NFE_WRITE(sc, NFE_STATUS, 0);
2867
2868	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2869		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2870
2871	sc->rxtxctl = NFE_RXTX_BIT2;
2872	if (sc->nfe_flags & NFE_40BIT_ADDR)
2873		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2874	else if (sc->nfe_flags & NFE_JUMBO_SUP)
2875		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2876
2877	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2878		sc->rxtxctl |= NFE_RXTX_RXCSUM;
2879	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2880		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2881
2882	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2883	DELAY(10);
2884	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2885
2886	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2887		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2888	else
2889		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2890
2891	NFE_WRITE(sc, NFE_SETUP_R6, 0);
2892
2893	/* set MAC address */
2894	nfe_set_macaddr(sc, IF_LLADDR(ifp));
2895
2896	/* tell MAC where rings are in memory */
2897	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2898		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2899		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
2900		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2901		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
2902	} else {
2903		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2904		    NFE_ADDR_HI(sc->rxq.physaddr));
2905		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2906		    NFE_ADDR_LO(sc->rxq.physaddr));
2907	}
2908	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2909	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2910
2911	NFE_WRITE(sc, NFE_RING_SIZE,
2912	    (NFE_RX_RING_COUNT - 1) << 16 |
2913	    (NFE_TX_RING_COUNT - 1));
2914
2915	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2916
2917	/* force MAC to wakeup */
2918	val = NFE_READ(sc, NFE_PWR_STATE);
2919	if ((val & NFE_PWR_WAKEUP) == 0)
2920		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2921	DELAY(10);
2922	val = NFE_READ(sc, NFE_PWR_STATE);
2923	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2924
2925#if 1
2926	/* configure interrupts coalescing/mitigation */
2927	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2928#else
2929	/* no interrupt mitigation: one interrupt per packet */
2930	NFE_WRITE(sc, NFE_IMTIMER, 970);
2931#endif
2932
2933	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2934	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2935	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2936
2937	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2938	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2939
2940	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2941	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
2942
2943	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2944	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2945	DELAY(10);
2946	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2947
2948	/* set Rx filter */
2949	nfe_setmulti(sc);
2950
2951	/* enable Rx */
2952	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2953
2954	/* enable Tx */
2955	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2956
2957	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2958
2959#ifdef DEVICE_POLLING
2960	if (ifp->if_capenable & IFCAP_POLLING)
2961		nfe_disable_intr(sc);
2962	else
2963#endif
2964	nfe_set_intr(sc);
2965	nfe_enable_intr(sc); /* enable interrupts */
2966
2967	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2968	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2969
2970	sc->nfe_link = 0;
2971	mii_mediachg(mii);
2972
2973	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2974}
2975
2976
2977static void
2978nfe_stop(struct ifnet *ifp)
2979{
2980	struct nfe_softc *sc = ifp->if_softc;
2981	struct nfe_rx_ring *rx_ring;
2982	struct nfe_jrx_ring *jrx_ring;
2983	struct nfe_tx_ring *tx_ring;
2984	struct nfe_rx_data *rdata;
2985	struct nfe_tx_data *tdata;
2986	int i;
2987
2988	NFE_LOCK_ASSERT(sc);
2989
2990	sc->nfe_watchdog_timer = 0;
2991	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2992
2993	callout_stop(&sc->nfe_stat_ch);
2994
2995	/* abort Tx */
2996	NFE_WRITE(sc, NFE_TX_CTL, 0);
2997
2998	/* disable Rx */
2999	NFE_WRITE(sc, NFE_RX_CTL, 0);
3000
3001	/* disable interrupts */
3002	nfe_disable_intr(sc);
3003
3004	sc->nfe_link = 0;
3005
3006	/* free Rx and Tx mbufs still in the queues. */
3007	rx_ring = &sc->rxq;
3008	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
3009		rdata = &rx_ring->data[i];
3010		if (rdata->m != NULL) {
3011			bus_dmamap_sync(rx_ring->rx_data_tag,
3012			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
3013			bus_dmamap_unload(rx_ring->rx_data_tag,
3014			    rdata->rx_data_map);
3015			m_freem(rdata->m);
3016			rdata->m = NULL;
3017		}
3018	}
3019
3020	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
3021		jrx_ring = &sc->jrxq;
3022		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
3023			rdata = &jrx_ring->jdata[i];
3024			if (rdata->m != NULL) {
3025				bus_dmamap_sync(jrx_ring->jrx_data_tag,
3026				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
3027				bus_dmamap_unload(jrx_ring->jrx_data_tag,
3028				    rdata->rx_data_map);
3029				m_freem(rdata->m);
3030				rdata->m = NULL;
3031			}
3032		}
3033	}
3034
3035	tx_ring = &sc->txq;
3036	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
3037		tdata = &tx_ring->data[i];
3038		if (tdata->m != NULL) {
3039			bus_dmamap_sync(tx_ring->tx_data_tag,
3040			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
3041			bus_dmamap_unload(tx_ring->tx_data_tag,
3042			    tdata->tx_data_map);
3043			m_freem(tdata->m);
3044			tdata->m = NULL;
3045		}
3046	}
3047}
3048
3049
3050static int
3051nfe_ifmedia_upd(struct ifnet *ifp)
3052{
3053	struct nfe_softc *sc = ifp->if_softc;
3054	struct mii_data *mii;
3055
3056	NFE_LOCK(sc);
3057	mii = device_get_softc(sc->nfe_miibus);
3058	mii_mediachg(mii);
3059	NFE_UNLOCK(sc);
3060
3061	return (0);
3062}
3063
3064
3065static void
3066nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3067{
3068	struct nfe_softc *sc;
3069	struct mii_data *mii;
3070
3071	sc = ifp->if_softc;
3072
3073	NFE_LOCK(sc);
3074	mii = device_get_softc(sc->nfe_miibus);
3075	mii_pollstat(mii);
3076	NFE_UNLOCK(sc);
3077
3078	ifmr->ifm_active = mii->mii_media_active;
3079	ifmr->ifm_status = mii->mii_media_status;
3080}
3081
3082
3083void
3084nfe_tick(void *xsc)
3085{
3086	struct nfe_softc *sc;
3087	struct mii_data *mii;
3088	struct ifnet *ifp;
3089
3090	sc = (struct nfe_softc *)xsc;
3091
3092	NFE_LOCK_ASSERT(sc);
3093
3094	ifp = sc->nfe_ifp;
3095
3096	mii = device_get_softc(sc->nfe_miibus);
3097	mii_tick(mii);
3098	nfe_watchdog(ifp);
3099	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
3100}
3101
3102
3103static void
3104nfe_shutdown(device_t dev)
3105{
3106	struct nfe_softc *sc;
3107	struct ifnet *ifp;
3108
3109	sc = device_get_softc(dev);
3110
3111	NFE_LOCK(sc);
3112	ifp = sc->nfe_ifp;
3113	nfe_stop(ifp);
3114	/* nfe_reset(sc); */
3115	NFE_UNLOCK(sc);
3116}
3117
3118
3119static void
3120nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
3121{
3122	uint32_t val;
3123
3124	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
3125		val = NFE_READ(sc, NFE_MACADDR_LO);
3126		addr[0] = (val >> 8) & 0xff;
3127		addr[1] = (val & 0xff);
3128
3129		val = NFE_READ(sc, NFE_MACADDR_HI);
3130		addr[2] = (val >> 24) & 0xff;
3131		addr[3] = (val >> 16) & 0xff;
3132		addr[4] = (val >>  8) & 0xff;
3133		addr[5] = (val & 0xff);
3134	} else {
3135		val = NFE_READ(sc, NFE_MACADDR_LO);
3136		addr[5] = (val >> 8) & 0xff;
3137		addr[4] = (val & 0xff);
3138
3139		val = NFE_READ(sc, NFE_MACADDR_HI);
3140		addr[3] = (val >> 24) & 0xff;
3141		addr[2] = (val >> 16) & 0xff;
3142		addr[1] = (val >>  8) & 0xff;
3143		addr[0] = (val & 0xff);
3144	}
3145}
3146
3147
3148static void
3149nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3150{
3151
3152	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
3153	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3154	    addr[1] << 8 | addr[0]);
3155}
3156
3157
3158/*
3159 * Map a single buffer address.
3160 */
3161
3162static void
3163nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3164{
3165	struct nfe_dmamap_arg *ctx;
3166
3167	if (error != 0)
3168		return;
3169
3170	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3171
3172	ctx = (struct nfe_dmamap_arg *)arg;
3173	ctx->nfe_busaddr = segs[0].ds_addr;
3174}
3175
3176
3177static int
3178sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3179{
3180	int error, value;
3181
3182	if (!arg1)
3183		return (EINVAL);
3184	value = *(int *)arg1;
3185	error = sysctl_handle_int(oidp, &value, 0, req);
3186	if (error || !req->newptr)
3187		return (error);
3188	if (value < low || value > high)
3189		return (EINVAL);
3190	*(int *)arg1 = value;
3191
3192	return (0);
3193}
3194
3195
3196static int
3197sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3198{
3199
3200	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3201	    NFE_PROC_MAX));
3202}
3203