if_nfe.c revision 183509
1/*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 183509 2008-10-01 00:17:54Z yongari $");
25
26#ifdef HAVE_KERNEL_OPTION_HEADERS
27#include "opt_device_polling.h"
28#endif
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/kernel.h>
38#include <sys/queue.h>
39#include <sys/socket.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/if.h>
44#include <net/if_arp.h>
45#include <net/ethernet.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_vlan_var.h>
50
51#include <net/bpf.h>
52
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63
64#include <dev/nfe/if_nfereg.h>
65#include <dev/nfe/if_nfevar.h>
66
67MODULE_DEPEND(nfe, pci, 1, 1, 1);
68MODULE_DEPEND(nfe, ether, 1, 1, 1);
69MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70
71/* "device miibus" required.  See GENERIC if you get errors here. */
72#include "miibus_if.h"
73
74static int  nfe_probe(device_t);
75static int  nfe_attach(device_t);
76static int  nfe_detach(device_t);
77static int  nfe_suspend(device_t);
78static int  nfe_resume(device_t);
79static int nfe_shutdown(device_t);
80static void nfe_power(struct nfe_softc *);
81static int  nfe_miibus_readreg(device_t, int, int);
82static int  nfe_miibus_writereg(device_t, int, int, int);
83static void nfe_miibus_statchg(device_t);
84static void nfe_link_task(void *, int);
85static void nfe_set_intr(struct nfe_softc *);
86static __inline void nfe_enable_intr(struct nfe_softc *);
87static __inline void nfe_disable_intr(struct nfe_softc *);
88static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
89static void nfe_alloc_msix(struct nfe_softc *, int);
90static int nfe_intr(void *);
91static void nfe_int_task(void *, int);
92static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
93static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
94static int nfe_newbuf(struct nfe_softc *, int);
95static int nfe_jnewbuf(struct nfe_softc *, int);
96static int  nfe_rxeof(struct nfe_softc *, int);
97static int  nfe_jrxeof(struct nfe_softc *, int);
98static void nfe_txeof(struct nfe_softc *);
99static int  nfe_encap(struct nfe_softc *, struct mbuf **);
100static void nfe_setmulti(struct nfe_softc *);
101static void nfe_tx_task(void *, int);
102static void nfe_start(struct ifnet *);
103static void nfe_watchdog(struct ifnet *);
104static void nfe_init(void *);
105static void nfe_init_locked(void *);
106static void nfe_stop(struct ifnet *);
107static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
109static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
114static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116static int  nfe_ifmedia_upd(struct ifnet *);
117static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
118static void nfe_tick(void *);
119static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
120static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
121static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
122
123static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
124static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
125
126#ifdef NFE_DEBUG
127static int nfedebug = 0;
128#define	DPRINTF(sc, ...)	do {				\
129	if (nfedebug)						\
130		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
131} while (0)
132#define	DPRINTFN(sc, n, ...)	do {				\
133	if (nfedebug >= (n))					\
134		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
135} while (0)
136#else
137#define	DPRINTF(sc, ...)
138#define	DPRINTFN(sc, n, ...)
139#endif
140
141#define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
142#define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
143#define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
144
145/* Tunables. */
146static int msi_disable = 0;
147static int msix_disable = 0;
148static int jumbo_disable = 0;
149TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
150TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
151TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
152
153static device_method_t nfe_methods[] = {
154	/* Device interface */
155	DEVMETHOD(device_probe,		nfe_probe),
156	DEVMETHOD(device_attach,	nfe_attach),
157	DEVMETHOD(device_detach,	nfe_detach),
158	DEVMETHOD(device_suspend,	nfe_suspend),
159	DEVMETHOD(device_resume,	nfe_resume),
160	DEVMETHOD(device_shutdown,	nfe_shutdown),
161
162	/* bus interface */
163	DEVMETHOD(bus_print_child,	bus_generic_print_child),
164	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
165
166	/* MII interface */
167	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
168	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
169	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
170
171	{ NULL, NULL }
172};
173
174static driver_t nfe_driver = {
175	"nfe",
176	nfe_methods,
177	sizeof(struct nfe_softc)
178};
179
180static devclass_t nfe_devclass;
181
182DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
183DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
184
185static struct nfe_type nfe_devs[] = {
186	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
187	    "NVIDIA nForce MCP Networking Adapter"},
188	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
189	    "NVIDIA nForce2 MCP2 Networking Adapter"},
190	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
191	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
192	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
193	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
194	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
195	    "NVIDIA nForce3 MCP3 Networking Adapter"},
196	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
197	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
198	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
199	    "NVIDIA nForce3 MCP7 Networking Adapter"},
200	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
201	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
202	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
203	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
204	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
205	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
206	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
207	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
208	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
209	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
210	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
211	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
212	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
213	    "NVIDIA nForce MCP55 Networking Adapter"},
214	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
215	    "NVIDIA nForce MCP55 Networking Adapter"},
216	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
217	    "NVIDIA nForce MCP61 Networking Adapter"},
218	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
219	    "NVIDIA nForce MCP61 Networking Adapter"},
220	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
221	    "NVIDIA nForce MCP61 Networking Adapter"},
222	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
223	    "NVIDIA nForce MCP61 Networking Adapter"},
224	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
225	    "NVIDIA nForce MCP65 Networking Adapter"},
226	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
227	    "NVIDIA nForce MCP65 Networking Adapter"},
228	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
229	    "NVIDIA nForce MCP65 Networking Adapter"},
230	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
231	    "NVIDIA nForce MCP65 Networking Adapter"},
232	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
233	    "NVIDIA nForce MCP67 Networking Adapter"},
234	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
235	    "NVIDIA nForce MCP67 Networking Adapter"},
236	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
237	    "NVIDIA nForce MCP67 Networking Adapter"},
238	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
239	    "NVIDIA nForce MCP67 Networking Adapter"},
240	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
241	    "NVIDIA nForce MCP73 Networking Adapter"},
242	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
243	    "NVIDIA nForce MCP73 Networking Adapter"},
244	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
245	    "NVIDIA nForce MCP73 Networking Adapter"},
246	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
247	    "NVIDIA nForce MCP73 Networking Adapter"},
248	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
249	    "NVIDIA nForce MCP77 Networking Adapter"},
250	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
251	    "NVIDIA nForce MCP77 Networking Adapter"},
252	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
253	    "NVIDIA nForce MCP77 Networking Adapter"},
254	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
255	    "NVIDIA nForce MCP77 Networking Adapter"},
256	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
257	    "NVIDIA nForce MCP79 Networking Adapter"},
258	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
259	    "NVIDIA nForce MCP79 Networking Adapter"},
260	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
261	    "NVIDIA nForce MCP79 Networking Adapter"},
262	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
263	    "NVIDIA nForce MCP79 Networking Adapter"},
264	{0, 0, NULL}
265};
266
267
268/* Probe for supported hardware ID's */
269static int
270nfe_probe(device_t dev)
271{
272	struct nfe_type *t;
273
274	t = nfe_devs;
275	/* Check for matching PCI DEVICE ID's */
276	while (t->name != NULL) {
277		if ((pci_get_vendor(dev) == t->vid_id) &&
278		    (pci_get_device(dev) == t->dev_id)) {
279			device_set_desc(dev, t->name);
280			return (BUS_PROBE_DEFAULT);
281		}
282		t++;
283	}
284
285	return (ENXIO);
286}
287
288static void
289nfe_alloc_msix(struct nfe_softc *sc, int count)
290{
291	int rid;
292
293	rid = PCIR_BAR(2);
294	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
295	    &rid, RF_ACTIVE);
296	if (sc->nfe_msix_res == NULL) {
297		device_printf(sc->nfe_dev,
298		    "couldn't allocate MSIX table resource\n");
299		return;
300	}
301	rid = PCIR_BAR(3);
302	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
303	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
304	if (sc->nfe_msix_pba_res == NULL) {
305		device_printf(sc->nfe_dev,
306		    "couldn't allocate MSIX PBA resource\n");
307		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
308		    sc->nfe_msix_res);
309		sc->nfe_msix_res = NULL;
310		return;
311	}
312
313	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
314		if (count == NFE_MSI_MESSAGES) {
315			if (bootverbose)
316				device_printf(sc->nfe_dev,
317				    "Using %d MSIX messages\n", count);
318			sc->nfe_msix = 1;
319		} else {
320			if (bootverbose)
321				device_printf(sc->nfe_dev,
322				    "couldn't allocate MSIX\n");
323			pci_release_msi(sc->nfe_dev);
324			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
325			    PCIR_BAR(3), sc->nfe_msix_pba_res);
326			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
327			    PCIR_BAR(2), sc->nfe_msix_res);
328			sc->nfe_msix_pba_res = NULL;
329			sc->nfe_msix_res = NULL;
330		}
331	}
332}
333
334static int
335nfe_attach(device_t dev)
336{
337	struct nfe_softc *sc;
338	struct ifnet *ifp;
339	bus_addr_t dma_addr_max;
340	int error = 0, i, msic, reg, rid;
341
342	sc = device_get_softc(dev);
343	sc->nfe_dev = dev;
344
345	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
346	    MTX_DEF);
347	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
348	TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
349
350	pci_enable_busmaster(dev);
351
352	rid = PCIR_BAR(0);
353	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
354	    RF_ACTIVE);
355	if (sc->nfe_res[0] == NULL) {
356		device_printf(dev, "couldn't map memory resources\n");
357		mtx_destroy(&sc->nfe_mtx);
358		return (ENXIO);
359	}
360
361	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
362		uint16_t v, width;
363
364		v = pci_read_config(dev, reg + 0x08, 2);
365		/* Change max. read request size to 4096. */
366		v &= ~(7 << 12);
367		v |= (5 << 12);
368		pci_write_config(dev, reg + 0x08, v, 2);
369
370		v = pci_read_config(dev, reg + 0x0c, 2);
371		/* link capability */
372		v = (v >> 4) & 0x0f;
373		width = pci_read_config(dev, reg + 0x12, 2);
374		/* negotiated link width */
375		width = (width >> 4) & 0x3f;
376		if (v != width)
377			device_printf(sc->nfe_dev,
378			    "warning, negotiated width of link(x%d) != "
379			    "max. width of link(x%d)\n", width, v);
380	}
381
382	/* Allocate interrupt */
383	if (msix_disable == 0 || msi_disable == 0) {
384		if (msix_disable == 0 &&
385		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
386			nfe_alloc_msix(sc, msic);
387		if (msi_disable == 0 && sc->nfe_msix == 0 &&
388		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
389		    pci_alloc_msi(dev, &msic) == 0) {
390			if (msic == NFE_MSI_MESSAGES) {
391				if (bootverbose)
392					device_printf(dev,
393					    "Using %d MSI messages\n", msic);
394				sc->nfe_msi = 1;
395			} else
396				pci_release_msi(dev);
397		}
398	}
399
400	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
401		rid = 0;
402		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
403		    RF_SHAREABLE | RF_ACTIVE);
404		if (sc->nfe_irq[0] == NULL) {
405			device_printf(dev, "couldn't allocate IRQ resources\n");
406			error = ENXIO;
407			goto fail;
408		}
409	} else {
410		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
411			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
412			    SYS_RES_IRQ, &rid, RF_ACTIVE);
413			if (sc->nfe_irq[i] == NULL) {
414				device_printf(dev,
415				    "couldn't allocate IRQ resources for "
416				    "message %d\n", rid);
417				error = ENXIO;
418				goto fail;
419			}
420		}
421		/* Map interrupts to vector 0. */
422		if (sc->nfe_msix != 0) {
423			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
424			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
425		} else if (sc->nfe_msi != 0) {
426			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
427			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
428		}
429	}
430
431	/* Set IRQ status/mask register. */
432	sc->nfe_irq_status = NFE_IRQ_STATUS;
433	sc->nfe_irq_mask = NFE_IRQ_MASK;
434	sc->nfe_intrs = NFE_IRQ_WANTED;
435	sc->nfe_nointrs = 0;
436	if (sc->nfe_msix != 0) {
437		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
438		sc->nfe_nointrs = NFE_IRQ_WANTED;
439	} else if (sc->nfe_msi != 0) {
440		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
441		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
442	}
443
444	sc->nfe_devid = pci_get_device(dev);
445	sc->nfe_revid = pci_get_revid(dev);
446	sc->nfe_flags = 0;
447
448	switch (sc->nfe_devid) {
449	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
450	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
451	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
452	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
453		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
454		break;
455	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
456	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
457		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
458		break;
459	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
460	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
461	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
462	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
463		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
464		break;
465	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
466	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
467		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
468		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL;
469		break;
470
471	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
472	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
473	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
474	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
475	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
476	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
477	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
478	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
479	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
480	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
481	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
482	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
483		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
484		    NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
485		break;
486	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
487	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
488	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
489	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
490		/* XXX flow control */
491		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
492		    NFE_CORRECT_MACADDR;
493		break;
494	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
495	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
496	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
497	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
498		/* XXX flow control */
499		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
500		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR;
501		break;
502	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
503	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
504	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
505	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
506		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
507		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
508		break;
509	}
510
511	nfe_power(sc);
512	/* Check for reversed ethernet address */
513	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
514		sc->nfe_flags |= NFE_CORRECT_MACADDR;
515	nfe_get_macaddr(sc, sc->eaddr);
516	/*
517	 * Allocate the parent bus DMA tag appropriate for PCI.
518	 */
519	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
520	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
521		dma_addr_max = NFE_DMA_MAXADDR;
522	error = bus_dma_tag_create(
523	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
524	    1, 0,				/* alignment, boundary */
525	    dma_addr_max,			/* lowaddr */
526	    BUS_SPACE_MAXADDR,			/* highaddr */
527	    NULL, NULL,				/* filter, filterarg */
528	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
529	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
530	    0,					/* flags */
531	    NULL, NULL,				/* lockfunc, lockarg */
532	    &sc->nfe_parent_tag);
533	if (error)
534		goto fail;
535
536	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
537	if (ifp == NULL) {
538		device_printf(dev, "can not if_alloc()\n");
539		error = ENOSPC;
540		goto fail;
541	}
542	TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
543
544	/*
545	 * Allocate Tx and Rx rings.
546	 */
547	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
548		goto fail;
549
550	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
551		goto fail;
552
553	nfe_alloc_jrx_ring(sc, &sc->jrxq);
554
555	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
556	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
557	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
558	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
559	    "max number of Rx events to process");
560
561	sc->nfe_process_limit = NFE_PROC_DEFAULT;
562	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
563	    "process_limit", &sc->nfe_process_limit);
564	if (error == 0) {
565		if (sc->nfe_process_limit < NFE_PROC_MIN ||
566		    sc->nfe_process_limit > NFE_PROC_MAX) {
567			device_printf(dev, "process_limit value out of range; "
568			    "using default: %d\n", NFE_PROC_DEFAULT);
569			sc->nfe_process_limit = NFE_PROC_DEFAULT;
570		}
571	}
572
573	ifp->if_softc = sc;
574	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
575	ifp->if_mtu = ETHERMTU;
576	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
577	ifp->if_ioctl = nfe_ioctl;
578	ifp->if_start = nfe_start;
579	ifp->if_hwassist = 0;
580	ifp->if_capabilities = 0;
581	ifp->if_watchdog = NULL;
582	ifp->if_init = nfe_init;
583	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
584	ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
585	IFQ_SET_READY(&ifp->if_snd);
586
587	if (sc->nfe_flags & NFE_HW_CSUM) {
588		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
589		ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
590	}
591	ifp->if_capenable = ifp->if_capabilities;
592
593	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
594	/* VLAN capability setup. */
595	ifp->if_capabilities |= IFCAP_VLAN_MTU;
596	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
597		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
598		if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
599			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
600	}
601	ifp->if_capenable = ifp->if_capabilities;
602
603	/*
604	 * Tell the upper layer(s) we support long frames.
605	 * Must appear after the call to ether_ifattach() because
606	 * ether_ifattach() sets ifi_hdrlen to the default value.
607	 */
608	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
609
610#ifdef DEVICE_POLLING
611	ifp->if_capabilities |= IFCAP_POLLING;
612#endif
613
614	/* Do MII setup */
615	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
616	    nfe_ifmedia_sts)) {
617		device_printf(dev, "MII without any phy!\n");
618		error = ENXIO;
619		goto fail;
620	}
621	ether_ifattach(ifp, sc->eaddr);
622
623	TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
624	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
625	    taskqueue_thread_enqueue, &sc->nfe_tq);
626	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
627	    device_get_nameunit(sc->nfe_dev));
628	error = 0;
629	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
630		error = bus_setup_intr(dev, sc->nfe_irq[0],
631		    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
632		    &sc->nfe_intrhand[0]);
633	} else {
634		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
635			error = bus_setup_intr(dev, sc->nfe_irq[i],
636			    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
637			    &sc->nfe_intrhand[i]);
638			if (error != 0)
639				break;
640		}
641	}
642	if (error) {
643		device_printf(dev, "couldn't set up irq\n");
644		taskqueue_free(sc->nfe_tq);
645		sc->nfe_tq = NULL;
646		ether_ifdetach(ifp);
647		goto fail;
648	}
649
650fail:
651	if (error)
652		nfe_detach(dev);
653
654	return (error);
655}
656
657
658static int
659nfe_detach(device_t dev)
660{
661	struct nfe_softc *sc;
662	struct ifnet *ifp;
663	uint8_t eaddr[ETHER_ADDR_LEN];
664	int i, rid;
665
666	sc = device_get_softc(dev);
667	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
668	ifp = sc->nfe_ifp;
669
670#ifdef DEVICE_POLLING
671	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
672		ether_poll_deregister(ifp);
673#endif
674	if (device_is_attached(dev)) {
675		NFE_LOCK(sc);
676		nfe_stop(ifp);
677		ifp->if_flags &= ~IFF_UP;
678		NFE_UNLOCK(sc);
679		callout_drain(&sc->nfe_stat_ch);
680		taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
681		taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
682		ether_ifdetach(ifp);
683	}
684
685	if (ifp) {
686		/* restore ethernet address */
687		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
688			for (i = 0; i < ETHER_ADDR_LEN; i++) {
689				eaddr[i] = sc->eaddr[5 - i];
690			}
691		} else
692			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
693		nfe_set_macaddr(sc, eaddr);
694		if_free(ifp);
695	}
696	if (sc->nfe_miibus)
697		device_delete_child(dev, sc->nfe_miibus);
698	bus_generic_detach(dev);
699	if (sc->nfe_tq != NULL) {
700		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
701		taskqueue_free(sc->nfe_tq);
702		sc->nfe_tq = NULL;
703	}
704
705	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
706		if (sc->nfe_intrhand[i] != NULL) {
707			bus_teardown_intr(dev, sc->nfe_irq[i],
708			    sc->nfe_intrhand[i]);
709			sc->nfe_intrhand[i] = NULL;
710		}
711	}
712
713	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
714		if (sc->nfe_irq[0] != NULL)
715			bus_release_resource(dev, SYS_RES_IRQ, 0,
716			    sc->nfe_irq[0]);
717	} else {
718		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
719			if (sc->nfe_irq[i] != NULL) {
720				bus_release_resource(dev, SYS_RES_IRQ, rid,
721				    sc->nfe_irq[i]);
722				sc->nfe_irq[i] = NULL;
723			}
724		}
725		pci_release_msi(dev);
726	}
727	if (sc->nfe_msix_pba_res != NULL) {
728		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
729		    sc->nfe_msix_pba_res);
730		sc->nfe_msix_pba_res = NULL;
731	}
732	if (sc->nfe_msix_res != NULL) {
733		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
734		    sc->nfe_msix_res);
735		sc->nfe_msix_res = NULL;
736	}
737	if (sc->nfe_res[0] != NULL) {
738		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
739		    sc->nfe_res[0]);
740		sc->nfe_res[0] = NULL;
741	}
742
743	nfe_free_tx_ring(sc, &sc->txq);
744	nfe_free_rx_ring(sc, &sc->rxq);
745	nfe_free_jrx_ring(sc, &sc->jrxq);
746
747	if (sc->nfe_parent_tag) {
748		bus_dma_tag_destroy(sc->nfe_parent_tag);
749		sc->nfe_parent_tag = NULL;
750	}
751
752	mtx_destroy(&sc->nfe_mtx);
753
754	return (0);
755}
756
757
758static int
759nfe_suspend(device_t dev)
760{
761	struct nfe_softc *sc;
762
763	sc = device_get_softc(dev);
764
765	NFE_LOCK(sc);
766	nfe_stop(sc->nfe_ifp);
767	sc->nfe_suspended = 1;
768	NFE_UNLOCK(sc);
769
770	return (0);
771}
772
773
774static int
775nfe_resume(device_t dev)
776{
777	struct nfe_softc *sc;
778	struct ifnet *ifp;
779
780	sc = device_get_softc(dev);
781
782	NFE_LOCK(sc);
783	ifp = sc->nfe_ifp;
784	if (ifp->if_flags & IFF_UP)
785		nfe_init_locked(sc);
786	sc->nfe_suspended = 0;
787	NFE_UNLOCK(sc);
788
789	return (0);
790}
791
792
793/* Take PHY/NIC out of powerdown, from Linux */
794static void
795nfe_power(struct nfe_softc *sc)
796{
797	uint32_t pwr;
798
799	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
800		return;
801	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
802	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
803	DELAY(100);
804	NFE_WRITE(sc, NFE_MAC_RESET, 0);
805	DELAY(100);
806	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
807	pwr = NFE_READ(sc, NFE_PWR2_CTL);
808	pwr &= ~NFE_PWR2_WAKEUP_MASK;
809	if (sc->nfe_revid >= 0xa3 &&
810	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
811	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
812		pwr |= NFE_PWR2_REVA3;
813	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
814}
815
816
817static void
818nfe_miibus_statchg(device_t dev)
819{
820	struct nfe_softc *sc;
821
822	sc = device_get_softc(dev);
823	taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
824}
825
826
827static void
828nfe_link_task(void *arg, int pending)
829{
830	struct nfe_softc *sc;
831	struct mii_data *mii;
832	struct ifnet *ifp;
833	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
834	uint32_t gmask, rxctl, txctl, val;
835
836	sc = (struct nfe_softc *)arg;
837
838	NFE_LOCK(sc);
839
840	mii = device_get_softc(sc->nfe_miibus);
841	ifp = sc->nfe_ifp;
842	if (mii == NULL || ifp == NULL) {
843		NFE_UNLOCK(sc);
844		return;
845	}
846
847	if (mii->mii_media_status & IFM_ACTIVE) {
848		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
849			sc->nfe_link = 1;
850	} else
851		sc->nfe_link = 0;
852
853	phy = NFE_READ(sc, NFE_PHY_IFACE);
854	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
855
856	seed = NFE_READ(sc, NFE_RNDSEED);
857	seed &= ~NFE_SEED_MASK;
858
859	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
860		phy  |= NFE_PHY_HDX;	/* half-duplex */
861		misc |= NFE_MISC1_HDX;
862	}
863
864	switch (IFM_SUBTYPE(mii->mii_media_active)) {
865	case IFM_1000_T:	/* full-duplex only */
866		link |= NFE_MEDIA_1000T;
867		seed |= NFE_SEED_1000T;
868		phy  |= NFE_PHY_1000T;
869		break;
870	case IFM_100_TX:
871		link |= NFE_MEDIA_100TX;
872		seed |= NFE_SEED_100TX;
873		phy  |= NFE_PHY_100TX;
874		break;
875	case IFM_10_T:
876		link |= NFE_MEDIA_10T;
877		seed |= NFE_SEED_10T;
878		break;
879	}
880
881	if ((phy & 0x10000000) != 0) {
882		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
883			val = NFE_R1_MAGIC_1000;
884		else
885			val = NFE_R1_MAGIC_10_100;
886	} else
887		val = NFE_R1_MAGIC_DEFAULT;
888	NFE_WRITE(sc, NFE_SETUP_R1, val);
889
890	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
891
892	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
893	NFE_WRITE(sc, NFE_MISC1, misc);
894	NFE_WRITE(sc, NFE_LINKSPEED, link);
895
896	gmask = mii->mii_media_active & IFM_GMASK;
897	if ((gmask & IFM_FDX) != 0) {
898		/* It seems all hardwares supports Rx pause frames. */
899		val = NFE_READ(sc, NFE_RXFILTER);
900		if ((gmask & IFM_FLAG0) != 0)
901			val |= NFE_PFF_RX_PAUSE;
902		else
903			val &= ~NFE_PFF_RX_PAUSE;
904		NFE_WRITE(sc, NFE_RXFILTER, val);
905		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
906			val = NFE_READ(sc, NFE_MISC1);
907			if ((gmask & IFM_FLAG1) != 0) {
908				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
909				    NFE_TX_PAUSE_FRAME_ENABLE);
910				val |= NFE_MISC1_TX_PAUSE;
911			} else {
912				val &= ~NFE_MISC1_TX_PAUSE;
913				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
914				    NFE_TX_PAUSE_FRAME_DISABLE);
915			}
916			NFE_WRITE(sc, NFE_MISC1, val);
917		}
918	} else {
919		/* disable rx/tx pause frames */
920		val = NFE_READ(sc, NFE_RXFILTER);
921		val &= ~NFE_PFF_RX_PAUSE;
922		NFE_WRITE(sc, NFE_RXFILTER, val);
923		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
924			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
925			    NFE_TX_PAUSE_FRAME_DISABLE);
926			val = NFE_READ(sc, NFE_MISC1);
927			val &= ~NFE_MISC1_TX_PAUSE;
928			NFE_WRITE(sc, NFE_MISC1, val);
929		}
930	}
931
932	txctl = NFE_READ(sc, NFE_TX_CTL);
933	rxctl = NFE_READ(sc, NFE_RX_CTL);
934	if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
935		txctl |= NFE_TX_START;
936		rxctl |= NFE_RX_START;
937	} else {
938		txctl &= ~NFE_TX_START;
939		rxctl &= ~NFE_RX_START;
940	}
941	NFE_WRITE(sc, NFE_TX_CTL, txctl);
942	NFE_WRITE(sc, NFE_RX_CTL, rxctl);
943
944	NFE_UNLOCK(sc);
945}
946
947
948static int
949nfe_miibus_readreg(device_t dev, int phy, int reg)
950{
951	struct nfe_softc *sc = device_get_softc(dev);
952	uint32_t val;
953	int ntries;
954
955	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
956
957	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
958		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
959		DELAY(100);
960	}
961
962	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
963
964	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
965		DELAY(100);
966		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
967			break;
968	}
969	if (ntries == NFE_TIMEOUT) {
970		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
971		return 0;
972	}
973
974	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
975		DPRINTFN(sc, 2, "could not read PHY\n");
976		return 0;
977	}
978
979	val = NFE_READ(sc, NFE_PHY_DATA);
980	if (val != 0xffffffff && val != 0)
981		sc->mii_phyaddr = phy;
982
983	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
984
985	return (val);
986}
987
988
989static int
990nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
991{
992	struct nfe_softc *sc = device_get_softc(dev);
993	uint32_t ctl;
994	int ntries;
995
996	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
997
998	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
999		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1000		DELAY(100);
1001	}
1002
1003	NFE_WRITE(sc, NFE_PHY_DATA, val);
1004	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1005	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1006
1007	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1008		DELAY(100);
1009		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1010			break;
1011	}
1012#ifdef NFE_DEBUG
1013	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1014		device_printf(sc->nfe_dev, "could not write to PHY\n");
1015#endif
1016	return (0);
1017}
1018
1019struct nfe_dmamap_arg {
1020	bus_addr_t nfe_busaddr;
1021};
1022
1023static int
1024nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1025{
1026	struct nfe_dmamap_arg ctx;
1027	struct nfe_rx_data *data;
1028	void *desc;
1029	int i, error, descsize;
1030
1031	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1032		desc = ring->desc64;
1033		descsize = sizeof (struct nfe_desc64);
1034	} else {
1035		desc = ring->desc32;
1036		descsize = sizeof (struct nfe_desc32);
1037	}
1038
1039	ring->cur = ring->next = 0;
1040
1041	error = bus_dma_tag_create(sc->nfe_parent_tag,
1042	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1043	    BUS_SPACE_MAXADDR,			/* lowaddr */
1044	    BUS_SPACE_MAXADDR,			/* highaddr */
1045	    NULL, NULL,				/* filter, filterarg */
1046	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1047	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
1048	    0,					/* flags */
1049	    NULL, NULL,				/* lockfunc, lockarg */
1050	    &ring->rx_desc_tag);
1051	if (error != 0) {
1052		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1053		goto fail;
1054	}
1055
1056	/* allocate memory to desc */
1057	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1058	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1059	if (error != 0) {
1060		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1061		goto fail;
1062	}
1063	if (sc->nfe_flags & NFE_40BIT_ADDR)
1064		ring->desc64 = desc;
1065	else
1066		ring->desc32 = desc;
1067
1068	/* map desc to device visible address space */
1069	ctx.nfe_busaddr = 0;
1070	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1071	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1072	if (error != 0) {
1073		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1074		goto fail;
1075	}
1076	ring->physaddr = ctx.nfe_busaddr;
1077
1078	error = bus_dma_tag_create(sc->nfe_parent_tag,
1079	    1, 0,			/* alignment, boundary */
1080	    BUS_SPACE_MAXADDR,		/* lowaddr */
1081	    BUS_SPACE_MAXADDR,		/* highaddr */
1082	    NULL, NULL,			/* filter, filterarg */
1083	    MCLBYTES, 1,		/* maxsize, nsegments */
1084	    MCLBYTES,			/* maxsegsize */
1085	    0,				/* flags */
1086	    NULL, NULL,			/* lockfunc, lockarg */
1087	    &ring->rx_data_tag);
1088	if (error != 0) {
1089		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1090		goto fail;
1091	}
1092
1093	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1094	if (error != 0) {
1095		device_printf(sc->nfe_dev,
1096		    "could not create Rx DMA spare map\n");
1097		goto fail;
1098	}
1099
1100	/*
1101	 * Pre-allocate Rx buffers and populate Rx ring.
1102	 */
1103	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1104		data = &sc->rxq.data[i];
1105		data->rx_data_map = NULL;
1106		data->m = NULL;
1107		error = bus_dmamap_create(ring->rx_data_tag, 0,
1108		    &data->rx_data_map);
1109		if (error != 0) {
1110			device_printf(sc->nfe_dev,
1111			    "could not create Rx DMA map\n");
1112			goto fail;
1113		}
1114	}
1115
1116fail:
1117	return (error);
1118}
1119
1120
1121static void
1122nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1123{
1124	struct nfe_dmamap_arg ctx;
1125	struct nfe_rx_data *data;
1126	void *desc;
1127	int i, error, descsize;
1128
1129	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1130		return;
1131	if (jumbo_disable != 0) {
1132		device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1133		sc->nfe_jumbo_disable = 1;
1134		return;
1135	}
1136
1137	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1138		desc = ring->jdesc64;
1139		descsize = sizeof (struct nfe_desc64);
1140	} else {
1141		desc = ring->jdesc32;
1142		descsize = sizeof (struct nfe_desc32);
1143	}
1144
1145	ring->jcur = ring->jnext = 0;
1146
1147	/* Create DMA tag for jumbo Rx ring. */
1148	error = bus_dma_tag_create(sc->nfe_parent_tag,
1149	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1150	    BUS_SPACE_MAXADDR,			/* lowaddr */
1151	    BUS_SPACE_MAXADDR,			/* highaddr */
1152	    NULL, NULL,				/* filter, filterarg */
1153	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
1154	    1, 					/* nsegments */
1155	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
1156	    0,					/* flags */
1157	    NULL, NULL,				/* lockfunc, lockarg */
1158	    &ring->jrx_desc_tag);
1159	if (error != 0) {
1160		device_printf(sc->nfe_dev,
1161		    "could not create jumbo ring DMA tag\n");
1162		goto fail;
1163	}
1164
1165	/* Create DMA tag for jumbo Rx buffers. */
1166	error = bus_dma_tag_create(sc->nfe_parent_tag,
1167	    PAGE_SIZE, 0,			/* alignment, boundary */
1168	    BUS_SPACE_MAXADDR,			/* lowaddr */
1169	    BUS_SPACE_MAXADDR,			/* highaddr */
1170	    NULL, NULL,				/* filter, filterarg */
1171	    MJUM9BYTES,				/* maxsize */
1172	    1,					/* nsegments */
1173	    MJUM9BYTES,				/* maxsegsize */
1174	    0,					/* flags */
1175	    NULL, NULL,				/* lockfunc, lockarg */
1176	    &ring->jrx_data_tag);
1177	if (error != 0) {
1178		device_printf(sc->nfe_dev,
1179		    "could not create jumbo Rx buffer DMA tag\n");
1180		goto fail;
1181	}
1182
1183	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1184	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1185	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1186	if (error != 0) {
1187		device_printf(sc->nfe_dev,
1188		    "could not allocate DMA'able memory for jumbo Rx ring\n");
1189		goto fail;
1190	}
1191	if (sc->nfe_flags & NFE_40BIT_ADDR)
1192		ring->jdesc64 = desc;
1193	else
1194		ring->jdesc32 = desc;
1195
1196	ctx.nfe_busaddr = 0;
1197	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1198	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1199	if (error != 0) {
1200		device_printf(sc->nfe_dev,
1201		    "could not load DMA'able memory for jumbo Rx ring\n");
1202		goto fail;
1203	}
1204	ring->jphysaddr = ctx.nfe_busaddr;
1205
1206	/* Create DMA maps for jumbo Rx buffers. */
1207	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1208	if (error != 0) {
1209		device_printf(sc->nfe_dev,
1210		    "could not create jumbo Rx DMA spare map\n");
1211		goto fail;
1212	}
1213
1214	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1215		data = &sc->jrxq.jdata[i];
1216		data->rx_data_map = NULL;
1217		data->m = NULL;
1218		error = bus_dmamap_create(ring->jrx_data_tag, 0,
1219		    &data->rx_data_map);
1220		if (error != 0) {
1221			device_printf(sc->nfe_dev,
1222			    "could not create jumbo Rx DMA map\n");
1223			goto fail;
1224		}
1225	}
1226
1227	return;
1228
1229fail:
1230	/*
1231	 * Running without jumbo frame support is ok for most cases
1232	 * so don't fail on creating dma tag/map for jumbo frame.
1233	 */
1234	nfe_free_jrx_ring(sc, ring);
1235	device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1236	    "resource shortage\n");
1237	sc->nfe_jumbo_disable = 1;
1238}
1239
1240
1241static int
1242nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1243{
1244	void *desc;
1245	size_t descsize;
1246	int i;
1247
1248	ring->cur = ring->next = 0;
1249	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1250		desc = ring->desc64;
1251		descsize = sizeof (struct nfe_desc64);
1252	} else {
1253		desc = ring->desc32;
1254		descsize = sizeof (struct nfe_desc32);
1255	}
1256	bzero(desc, descsize * NFE_RX_RING_COUNT);
1257	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1258		if (nfe_newbuf(sc, i) != 0)
1259			return (ENOBUFS);
1260	}
1261
1262	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1263	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1264
1265	return (0);
1266}
1267
1268
1269static int
1270nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1271{
1272	void *desc;
1273	size_t descsize;
1274	int i;
1275
1276	ring->jcur = ring->jnext = 0;
1277	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1278		desc = ring->jdesc64;
1279		descsize = sizeof (struct nfe_desc64);
1280	} else {
1281		desc = ring->jdesc32;
1282		descsize = sizeof (struct nfe_desc32);
1283	}
1284	bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1285	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1286		if (nfe_jnewbuf(sc, i) != 0)
1287			return (ENOBUFS);
1288	}
1289
1290	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1291	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1292
1293	return (0);
1294}
1295
1296
1297static void
1298nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1299{
1300	struct nfe_rx_data *data;
1301	void *desc;
1302	int i, descsize;
1303
1304	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1305		desc = ring->desc64;
1306		descsize = sizeof (struct nfe_desc64);
1307	} else {
1308		desc = ring->desc32;
1309		descsize = sizeof (struct nfe_desc32);
1310	}
1311
1312	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1313		data = &ring->data[i];
1314		if (data->rx_data_map != NULL) {
1315			bus_dmamap_destroy(ring->rx_data_tag,
1316			    data->rx_data_map);
1317			data->rx_data_map = NULL;
1318		}
1319		if (data->m != NULL) {
1320			m_freem(data->m);
1321			data->m = NULL;
1322		}
1323	}
1324	if (ring->rx_data_tag != NULL) {
1325		if (ring->rx_spare_map != NULL) {
1326			bus_dmamap_destroy(ring->rx_data_tag,
1327			    ring->rx_spare_map);
1328			ring->rx_spare_map = NULL;
1329		}
1330		bus_dma_tag_destroy(ring->rx_data_tag);
1331		ring->rx_data_tag = NULL;
1332	}
1333
1334	if (desc != NULL) {
1335		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1336		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1337		ring->desc64 = NULL;
1338		ring->desc32 = NULL;
1339		ring->rx_desc_map = NULL;
1340	}
1341	if (ring->rx_desc_tag != NULL) {
1342		bus_dma_tag_destroy(ring->rx_desc_tag);
1343		ring->rx_desc_tag = NULL;
1344	}
1345}
1346
1347
1348static void
1349nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1350{
1351	struct nfe_rx_data *data;
1352	void *desc;
1353	int i, descsize;
1354
1355	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1356		return;
1357
1358	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1359		desc = ring->jdesc64;
1360		descsize = sizeof (struct nfe_desc64);
1361	} else {
1362		desc = ring->jdesc32;
1363		descsize = sizeof (struct nfe_desc32);
1364	}
1365
1366	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1367		data = &ring->jdata[i];
1368		if (data->rx_data_map != NULL) {
1369			bus_dmamap_destroy(ring->jrx_data_tag,
1370			    data->rx_data_map);
1371			data->rx_data_map = NULL;
1372		}
1373		if (data->m != NULL) {
1374			m_freem(data->m);
1375			data->m = NULL;
1376		}
1377	}
1378	if (ring->jrx_data_tag != NULL) {
1379		if (ring->jrx_spare_map != NULL) {
1380			bus_dmamap_destroy(ring->jrx_data_tag,
1381			    ring->jrx_spare_map);
1382			ring->jrx_spare_map = NULL;
1383		}
1384		bus_dma_tag_destroy(ring->jrx_data_tag);
1385		ring->jrx_data_tag = NULL;
1386	}
1387
1388	if (desc != NULL) {
1389		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1390		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1391		ring->jdesc64 = NULL;
1392		ring->jdesc32 = NULL;
1393		ring->jrx_desc_map = NULL;
1394	}
1395
1396	if (ring->jrx_desc_tag != NULL) {
1397		bus_dma_tag_destroy(ring->jrx_desc_tag);
1398		ring->jrx_desc_tag = NULL;
1399	}
1400}
1401
1402
1403static int
1404nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1405{
1406	struct nfe_dmamap_arg ctx;
1407	int i, error;
1408	void *desc;
1409	int descsize;
1410
1411	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1412		desc = ring->desc64;
1413		descsize = sizeof (struct nfe_desc64);
1414	} else {
1415		desc = ring->desc32;
1416		descsize = sizeof (struct nfe_desc32);
1417	}
1418
1419	ring->queued = 0;
1420	ring->cur = ring->next = 0;
1421
1422	error = bus_dma_tag_create(sc->nfe_parent_tag,
1423	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1424	    BUS_SPACE_MAXADDR,			/* lowaddr */
1425	    BUS_SPACE_MAXADDR,			/* highaddr */
1426	    NULL, NULL,				/* filter, filterarg */
1427	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1428	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
1429	    0,					/* flags */
1430	    NULL, NULL,				/* lockfunc, lockarg */
1431	    &ring->tx_desc_tag);
1432	if (error != 0) {
1433		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1434		goto fail;
1435	}
1436
1437	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1438	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1439	if (error != 0) {
1440		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1441		goto fail;
1442	}
1443	if (sc->nfe_flags & NFE_40BIT_ADDR)
1444		ring->desc64 = desc;
1445	else
1446		ring->desc32 = desc;
1447
1448	ctx.nfe_busaddr = 0;
1449	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1450	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1451	if (error != 0) {
1452		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1453		goto fail;
1454	}
1455	ring->physaddr = ctx.nfe_busaddr;
1456
1457	error = bus_dma_tag_create(sc->nfe_parent_tag,
1458	    1, 0,
1459	    BUS_SPACE_MAXADDR,
1460	    BUS_SPACE_MAXADDR,
1461	    NULL, NULL,
1462	    NFE_TSO_MAXSIZE,
1463	    NFE_MAX_SCATTER,
1464	    NFE_TSO_MAXSGSIZE,
1465	    0,
1466	    NULL, NULL,
1467	    &ring->tx_data_tag);
1468	if (error != 0) {
1469		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1470		goto fail;
1471	}
1472
1473	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1474		error = bus_dmamap_create(ring->tx_data_tag, 0,
1475		    &ring->data[i].tx_data_map);
1476		if (error != 0) {
1477			device_printf(sc->nfe_dev,
1478			    "could not create Tx DMA map\n");
1479			goto fail;
1480		}
1481	}
1482
1483fail:
1484	return (error);
1485}
1486
1487
1488static void
1489nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1490{
1491	void *desc;
1492	size_t descsize;
1493
1494	sc->nfe_force_tx = 0;
1495	ring->queued = 0;
1496	ring->cur = ring->next = 0;
1497	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1498		desc = ring->desc64;
1499		descsize = sizeof (struct nfe_desc64);
1500	} else {
1501		desc = ring->desc32;
1502		descsize = sizeof (struct nfe_desc32);
1503	}
1504	bzero(desc, descsize * NFE_TX_RING_COUNT);
1505
1506	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1507	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1508}
1509
1510
1511static void
1512nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1513{
1514	struct nfe_tx_data *data;
1515	void *desc;
1516	int i, descsize;
1517
1518	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1519		desc = ring->desc64;
1520		descsize = sizeof (struct nfe_desc64);
1521	} else {
1522		desc = ring->desc32;
1523		descsize = sizeof (struct nfe_desc32);
1524	}
1525
1526	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1527		data = &ring->data[i];
1528
1529		if (data->m != NULL) {
1530			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1531			    BUS_DMASYNC_POSTWRITE);
1532			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1533			m_freem(data->m);
1534			data->m = NULL;
1535		}
1536		if (data->tx_data_map != NULL) {
1537			bus_dmamap_destroy(ring->tx_data_tag,
1538			    data->tx_data_map);
1539			data->tx_data_map = NULL;
1540		}
1541	}
1542
1543	if (ring->tx_data_tag != NULL) {
1544		bus_dma_tag_destroy(ring->tx_data_tag);
1545		ring->tx_data_tag = NULL;
1546	}
1547
1548	if (desc != NULL) {
1549		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1550		    BUS_DMASYNC_POSTWRITE);
1551		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1552		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1553		ring->desc64 = NULL;
1554		ring->desc32 = NULL;
1555		ring->tx_desc_map = NULL;
1556		bus_dma_tag_destroy(ring->tx_desc_tag);
1557		ring->tx_desc_tag = NULL;
1558	}
1559}
1560
1561#ifdef DEVICE_POLLING
1562static poll_handler_t nfe_poll;
1563
1564
1565static void
1566nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1567{
1568	struct nfe_softc *sc = ifp->if_softc;
1569	uint32_t r;
1570
1571	NFE_LOCK(sc);
1572
1573	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1574		NFE_UNLOCK(sc);
1575		return;
1576	}
1577
1578	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1579		nfe_jrxeof(sc, count);
1580	else
1581		nfe_rxeof(sc, count);
1582	nfe_txeof(sc);
1583	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1584		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1585
1586	if (cmd == POLL_AND_CHECK_STATUS) {
1587		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1588			NFE_UNLOCK(sc);
1589			return;
1590		}
1591		NFE_WRITE(sc, sc->nfe_irq_status, r);
1592
1593		if (r & NFE_IRQ_LINK) {
1594			NFE_READ(sc, NFE_PHY_STATUS);
1595			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1596			DPRINTF(sc, "link state changed\n");
1597		}
1598	}
1599	NFE_UNLOCK(sc);
1600}
1601#endif /* DEVICE_POLLING */
1602
1603static void
1604nfe_set_intr(struct nfe_softc *sc)
1605{
1606
1607	if (sc->nfe_msi != 0)
1608		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1609}
1610
1611
1612/* In MSIX, a write to mask reegisters behaves as XOR. */
1613static __inline void
1614nfe_enable_intr(struct nfe_softc *sc)
1615{
1616
1617	if (sc->nfe_msix != 0) {
1618		/* XXX Should have a better way to enable interrupts! */
1619		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1620			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1621	} else
1622		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1623}
1624
1625
1626static __inline void
1627nfe_disable_intr(struct nfe_softc *sc)
1628{
1629
1630	if (sc->nfe_msix != 0) {
1631		/* XXX Should have a better way to disable interrupts! */
1632		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1633			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1634	} else
1635		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1636}
1637
1638
1639static int
1640nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1641{
1642	struct nfe_softc *sc;
1643	struct ifreq *ifr;
1644	struct mii_data *mii;
1645	int error, init, mask;
1646
1647	sc = ifp->if_softc;
1648	ifr = (struct ifreq *) data;
1649	error = 0;
1650	init = 0;
1651	switch (cmd) {
1652	case SIOCSIFMTU:
1653		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1654			error = EINVAL;
1655		else if (ifp->if_mtu != ifr->ifr_mtu) {
1656			if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1657			    (sc->nfe_jumbo_disable != 0)) &&
1658			    ifr->ifr_mtu > ETHERMTU)
1659				error = EINVAL;
1660			else {
1661				NFE_LOCK(sc);
1662				ifp->if_mtu = ifr->ifr_mtu;
1663				if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1664					nfe_init_locked(sc);
1665				NFE_UNLOCK(sc);
1666			}
1667		}
1668		break;
1669	case SIOCSIFFLAGS:
1670		NFE_LOCK(sc);
1671		if (ifp->if_flags & IFF_UP) {
1672			/*
1673			 * If only the PROMISC or ALLMULTI flag changes, then
1674			 * don't do a full re-init of the chip, just update
1675			 * the Rx filter.
1676			 */
1677			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1678			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1679			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1680				nfe_setmulti(sc);
1681			else
1682				nfe_init_locked(sc);
1683		} else {
1684			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1685				nfe_stop(ifp);
1686		}
1687		sc->nfe_if_flags = ifp->if_flags;
1688		NFE_UNLOCK(sc);
1689		error = 0;
1690		break;
1691	case SIOCADDMULTI:
1692	case SIOCDELMULTI:
1693		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1694			NFE_LOCK(sc);
1695			nfe_setmulti(sc);
1696			NFE_UNLOCK(sc);
1697			error = 0;
1698		}
1699		break;
1700	case SIOCSIFMEDIA:
1701	case SIOCGIFMEDIA:
1702		mii = device_get_softc(sc->nfe_miibus);
1703		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1704		break;
1705	case SIOCSIFCAP:
1706		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1707#ifdef DEVICE_POLLING
1708		if ((mask & IFCAP_POLLING) != 0) {
1709			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1710				error = ether_poll_register(nfe_poll, ifp);
1711				if (error)
1712					break;
1713				NFE_LOCK(sc);
1714				nfe_disable_intr(sc);
1715				ifp->if_capenable |= IFCAP_POLLING;
1716				NFE_UNLOCK(sc);
1717			} else {
1718				error = ether_poll_deregister(ifp);
1719				/* Enable interrupt even in error case */
1720				NFE_LOCK(sc);
1721				nfe_enable_intr(sc);
1722				ifp->if_capenable &= ~IFCAP_POLLING;
1723				NFE_UNLOCK(sc);
1724			}
1725		}
1726#endif /* DEVICE_POLLING */
1727		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1728		    (mask & IFCAP_HWCSUM) != 0) {
1729			ifp->if_capenable ^= IFCAP_HWCSUM;
1730			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1731			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1732				ifp->if_hwassist |= NFE_CSUM_FEATURES;
1733			else
1734				ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1735			init++;
1736		}
1737		if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1738		    (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1739			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1740			init++;
1741		}
1742		/*
1743		 * XXX
1744		 * It seems that VLAN stripping requires Rx checksum offload.
1745		 * Unfortunately FreeBSD has no way to disable only Rx side
1746		 * VLAN stripping. So when we know Rx checksum offload is
1747		 * disabled turn entire hardware VLAN assist off.
1748		 */
1749		if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1750		    (NFE_HW_CSUM | NFE_HW_VLAN)) {
1751			if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1752				ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1753		}
1754
1755		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1756		    (mask & IFCAP_TSO4) != 0) {
1757			ifp->if_capenable ^= IFCAP_TSO4;
1758			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1759			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1760				ifp->if_hwassist |= CSUM_TSO;
1761			else
1762				ifp->if_hwassist &= ~CSUM_TSO;
1763		}
1764
1765		if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1766			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1767			nfe_init(sc);
1768		}
1769		if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1770			VLAN_CAPABILITIES(ifp);
1771		break;
1772	default:
1773		error = ether_ioctl(ifp, cmd, data);
1774		break;
1775	}
1776
1777	return (error);
1778}
1779
1780
1781static int
1782nfe_intr(void *arg)
1783{
1784	struct nfe_softc *sc;
1785	uint32_t status;
1786
1787	sc = (struct nfe_softc *)arg;
1788
1789	status = NFE_READ(sc, sc->nfe_irq_status);
1790	if (status == 0 || status == 0xffffffff)
1791		return (FILTER_STRAY);
1792	nfe_disable_intr(sc);
1793	taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1794
1795	return (FILTER_HANDLED);
1796}
1797
1798
1799static void
1800nfe_int_task(void *arg, int pending)
1801{
1802	struct nfe_softc *sc = arg;
1803	struct ifnet *ifp = sc->nfe_ifp;
1804	uint32_t r;
1805	int domore;
1806
1807	NFE_LOCK(sc);
1808
1809	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1810		nfe_enable_intr(sc);
1811		NFE_UNLOCK(sc);
1812		return;	/* not for us */
1813	}
1814	NFE_WRITE(sc, sc->nfe_irq_status, r);
1815
1816	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1817
1818#ifdef DEVICE_POLLING
1819	if (ifp->if_capenable & IFCAP_POLLING) {
1820		NFE_UNLOCK(sc);
1821		return;
1822	}
1823#endif
1824
1825	if (r & NFE_IRQ_LINK) {
1826		NFE_READ(sc, NFE_PHY_STATUS);
1827		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1828		DPRINTF(sc, "link state changed\n");
1829	}
1830
1831	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1832		NFE_UNLOCK(sc);
1833		nfe_enable_intr(sc);
1834		return;
1835	}
1836
1837	domore = 0;
1838	/* check Rx ring */
1839	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1840		domore = nfe_jrxeof(sc, sc->nfe_process_limit);
1841	else
1842		domore = nfe_rxeof(sc, sc->nfe_process_limit);
1843	/* check Tx ring */
1844	nfe_txeof(sc);
1845
1846	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1847		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1848
1849	NFE_UNLOCK(sc);
1850
1851	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1852		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1853		return;
1854	}
1855
1856	/* Reenable interrupts. */
1857	nfe_enable_intr(sc);
1858}
1859
1860
1861static __inline void
1862nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1863{
1864	struct nfe_desc32 *desc32;
1865	struct nfe_desc64 *desc64;
1866	struct nfe_rx_data *data;
1867	struct mbuf *m;
1868
1869	data = &sc->rxq.data[idx];
1870	m = data->m;
1871
1872	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1873		desc64 = &sc->rxq.desc64[idx];
1874		/* VLAN packet may have overwritten it. */
1875		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1876		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1877		desc64->length = htole16(m->m_len);
1878		desc64->flags = htole16(NFE_RX_READY);
1879	} else {
1880		desc32 = &sc->rxq.desc32[idx];
1881		desc32->length = htole16(m->m_len);
1882		desc32->flags = htole16(NFE_RX_READY);
1883	}
1884}
1885
1886
1887static __inline void
1888nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1889{
1890	struct nfe_desc32 *desc32;
1891	struct nfe_desc64 *desc64;
1892	struct nfe_rx_data *data;
1893	struct mbuf *m;
1894
1895	data = &sc->jrxq.jdata[idx];
1896	m = data->m;
1897
1898	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1899		desc64 = &sc->jrxq.jdesc64[idx];
1900		/* VLAN packet may have overwritten it. */
1901		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1902		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1903		desc64->length = htole16(m->m_len);
1904		desc64->flags = htole16(NFE_RX_READY);
1905	} else {
1906		desc32 = &sc->jrxq.jdesc32[idx];
1907		desc32->length = htole16(m->m_len);
1908		desc32->flags = htole16(NFE_RX_READY);
1909	}
1910}
1911
1912
1913static int
1914nfe_newbuf(struct nfe_softc *sc, int idx)
1915{
1916	struct nfe_rx_data *data;
1917	struct nfe_desc32 *desc32;
1918	struct nfe_desc64 *desc64;
1919	struct mbuf *m;
1920	bus_dma_segment_t segs[1];
1921	bus_dmamap_t map;
1922	int nsegs;
1923
1924	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1925	if (m == NULL)
1926		return (ENOBUFS);
1927
1928	m->m_len = m->m_pkthdr.len = MCLBYTES;
1929	m_adj(m, ETHER_ALIGN);
1930
1931	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1932	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1933		m_freem(m);
1934		return (ENOBUFS);
1935	}
1936	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1937
1938	data = &sc->rxq.data[idx];
1939	if (data->m != NULL) {
1940		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1941		    BUS_DMASYNC_POSTREAD);
1942		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1943	}
1944	map = data->rx_data_map;
1945	data->rx_data_map = sc->rxq.rx_spare_map;
1946	sc->rxq.rx_spare_map = map;
1947	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1948	    BUS_DMASYNC_PREREAD);
1949	data->paddr = segs[0].ds_addr;
1950	data->m = m;
1951	/* update mapping address in h/w descriptor */
1952	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1953		desc64 = &sc->rxq.desc64[idx];
1954		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1955		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1956		desc64->length = htole16(segs[0].ds_len);
1957		desc64->flags = htole16(NFE_RX_READY);
1958	} else {
1959		desc32 = &sc->rxq.desc32[idx];
1960		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1961		desc32->length = htole16(segs[0].ds_len);
1962		desc32->flags = htole16(NFE_RX_READY);
1963	}
1964
1965	return (0);
1966}
1967
1968
1969static int
1970nfe_jnewbuf(struct nfe_softc *sc, int idx)
1971{
1972	struct nfe_rx_data *data;
1973	struct nfe_desc32 *desc32;
1974	struct nfe_desc64 *desc64;
1975	struct mbuf *m;
1976	bus_dma_segment_t segs[1];
1977	bus_dmamap_t map;
1978	int nsegs;
1979
1980	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1981	if (m == NULL)
1982		return (ENOBUFS);
1983	if ((m->m_flags & M_EXT) == 0) {
1984		m_freem(m);
1985		return (ENOBUFS);
1986	}
1987	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1988	m_adj(m, ETHER_ALIGN);
1989
1990	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
1991	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1992		m_freem(m);
1993		return (ENOBUFS);
1994	}
1995	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1996
1997	data = &sc->jrxq.jdata[idx];
1998	if (data->m != NULL) {
1999		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2000		    BUS_DMASYNC_POSTREAD);
2001		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2002	}
2003	map = data->rx_data_map;
2004	data->rx_data_map = sc->jrxq.jrx_spare_map;
2005	sc->jrxq.jrx_spare_map = map;
2006	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2007	    BUS_DMASYNC_PREREAD);
2008	data->paddr = segs[0].ds_addr;
2009	data->m = m;
2010	/* update mapping address in h/w descriptor */
2011	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2012		desc64 = &sc->jrxq.jdesc64[idx];
2013		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2014		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2015		desc64->length = htole16(segs[0].ds_len);
2016		desc64->flags = htole16(NFE_RX_READY);
2017	} else {
2018		desc32 = &sc->jrxq.jdesc32[idx];
2019		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2020		desc32->length = htole16(segs[0].ds_len);
2021		desc32->flags = htole16(NFE_RX_READY);
2022	}
2023
2024	return (0);
2025}
2026
2027
2028static int
2029nfe_rxeof(struct nfe_softc *sc, int count)
2030{
2031	struct ifnet *ifp = sc->nfe_ifp;
2032	struct nfe_desc32 *desc32;
2033	struct nfe_desc64 *desc64;
2034	struct nfe_rx_data *data;
2035	struct mbuf *m;
2036	uint16_t flags;
2037	int len, prog;
2038	uint32_t vtag = 0;
2039
2040	NFE_LOCK_ASSERT(sc);
2041
2042	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2043	    BUS_DMASYNC_POSTREAD);
2044
2045	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2046		if (count <= 0)
2047			break;
2048		count--;
2049
2050		data = &sc->rxq.data[sc->rxq.cur];
2051
2052		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2053			desc64 = &sc->rxq.desc64[sc->rxq.cur];
2054			vtag = le32toh(desc64->physaddr[1]);
2055			flags = le16toh(desc64->flags);
2056			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2057		} else {
2058			desc32 = &sc->rxq.desc32[sc->rxq.cur];
2059			flags = le16toh(desc32->flags);
2060			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2061		}
2062
2063		if (flags & NFE_RX_READY)
2064			break;
2065		prog++;
2066		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2067			if (!(flags & NFE_RX_VALID_V1)) {
2068				ifp->if_ierrors++;
2069				nfe_discard_rxbuf(sc, sc->rxq.cur);
2070				continue;
2071			}
2072			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2073				flags &= ~NFE_RX_ERROR;
2074				len--;	/* fix buffer length */
2075			}
2076		} else {
2077			if (!(flags & NFE_RX_VALID_V2)) {
2078				ifp->if_ierrors++;
2079				nfe_discard_rxbuf(sc, sc->rxq.cur);
2080				continue;
2081			}
2082
2083			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2084				flags &= ~NFE_RX_ERROR;
2085				len--;	/* fix buffer length */
2086			}
2087		}
2088
2089		if (flags & NFE_RX_ERROR) {
2090			ifp->if_ierrors++;
2091			nfe_discard_rxbuf(sc, sc->rxq.cur);
2092			continue;
2093		}
2094
2095		m = data->m;
2096		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2097			ifp->if_iqdrops++;
2098			nfe_discard_rxbuf(sc, sc->rxq.cur);
2099			continue;
2100		}
2101
2102		if ((vtag & NFE_RX_VTAG) != 0 &&
2103		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2104			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2105			m->m_flags |= M_VLANTAG;
2106		}
2107
2108		m->m_pkthdr.len = m->m_len = len;
2109		m->m_pkthdr.rcvif = ifp;
2110
2111		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2112			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2113				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2114				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2115				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2116				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2117					m->m_pkthdr.csum_flags |=
2118					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2119					m->m_pkthdr.csum_data = 0xffff;
2120				}
2121			}
2122		}
2123
2124		ifp->if_ipackets++;
2125
2126		NFE_UNLOCK(sc);
2127		(*ifp->if_input)(ifp, m);
2128		NFE_LOCK(sc);
2129	}
2130
2131	if (prog > 0)
2132		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2133		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2134
2135	return (count > 0 ? 0 : EAGAIN);
2136}
2137
2138
2139static int
2140nfe_jrxeof(struct nfe_softc *sc, int count)
2141{
2142	struct ifnet *ifp = sc->nfe_ifp;
2143	struct nfe_desc32 *desc32;
2144	struct nfe_desc64 *desc64;
2145	struct nfe_rx_data *data;
2146	struct mbuf *m;
2147	uint16_t flags;
2148	int len, prog;
2149	uint32_t vtag = 0;
2150
2151	NFE_LOCK_ASSERT(sc);
2152
2153	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2154	    BUS_DMASYNC_POSTREAD);
2155
2156	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2157	    vtag = 0) {
2158		if (count <= 0)
2159			break;
2160		count--;
2161
2162		data = &sc->jrxq.jdata[sc->jrxq.jcur];
2163
2164		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2165			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2166			vtag = le32toh(desc64->physaddr[1]);
2167			flags = le16toh(desc64->flags);
2168			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2169		} else {
2170			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2171			flags = le16toh(desc32->flags);
2172			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2173		}
2174
2175		if (flags & NFE_RX_READY)
2176			break;
2177		prog++;
2178		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2179			if (!(flags & NFE_RX_VALID_V1)) {
2180				ifp->if_ierrors++;
2181				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2182				continue;
2183			}
2184			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2185				flags &= ~NFE_RX_ERROR;
2186				len--;	/* fix buffer length */
2187			}
2188		} else {
2189			if (!(flags & NFE_RX_VALID_V2)) {
2190				ifp->if_ierrors++;
2191				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2192				continue;
2193			}
2194
2195			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2196				flags &= ~NFE_RX_ERROR;
2197				len--;	/* fix buffer length */
2198			}
2199		}
2200
2201		if (flags & NFE_RX_ERROR) {
2202			ifp->if_ierrors++;
2203			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2204			continue;
2205		}
2206
2207		m = data->m;
2208		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2209			ifp->if_iqdrops++;
2210			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2211			continue;
2212		}
2213
2214		if ((vtag & NFE_RX_VTAG) != 0 &&
2215		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2216			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2217			m->m_flags |= M_VLANTAG;
2218		}
2219
2220		m->m_pkthdr.len = m->m_len = len;
2221		m->m_pkthdr.rcvif = ifp;
2222
2223		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2224			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2225				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2226				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2227				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2228				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2229					m->m_pkthdr.csum_flags |=
2230					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2231					m->m_pkthdr.csum_data = 0xffff;
2232				}
2233			}
2234		}
2235
2236		ifp->if_ipackets++;
2237
2238		NFE_UNLOCK(sc);
2239		(*ifp->if_input)(ifp, m);
2240		NFE_LOCK(sc);
2241	}
2242
2243	if (prog > 0)
2244		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2245		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2246
2247	return (count > 0 ? 0 : EAGAIN);
2248}
2249
2250
2251static void
2252nfe_txeof(struct nfe_softc *sc)
2253{
2254	struct ifnet *ifp = sc->nfe_ifp;
2255	struct nfe_desc32 *desc32;
2256	struct nfe_desc64 *desc64;
2257	struct nfe_tx_data *data = NULL;
2258	uint16_t flags;
2259	int cons, prog;
2260
2261	NFE_LOCK_ASSERT(sc);
2262
2263	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2264	    BUS_DMASYNC_POSTREAD);
2265
2266	prog = 0;
2267	for (cons = sc->txq.next; cons != sc->txq.cur;
2268	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
2269		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2270			desc64 = &sc->txq.desc64[cons];
2271			flags = le16toh(desc64->flags);
2272		} else {
2273			desc32 = &sc->txq.desc32[cons];
2274			flags = le16toh(desc32->flags);
2275		}
2276
2277		if (flags & NFE_TX_VALID)
2278			break;
2279
2280		prog++;
2281		sc->txq.queued--;
2282		data = &sc->txq.data[cons];
2283
2284		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2285			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2286				continue;
2287			if ((flags & NFE_TX_ERROR_V1) != 0) {
2288				device_printf(sc->nfe_dev,
2289				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2290
2291				ifp->if_oerrors++;
2292			} else
2293				ifp->if_opackets++;
2294		} else {
2295			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2296				continue;
2297			if ((flags & NFE_TX_ERROR_V2) != 0) {
2298				device_printf(sc->nfe_dev,
2299				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2300				ifp->if_oerrors++;
2301			} else
2302				ifp->if_opackets++;
2303		}
2304
2305		/* last fragment of the mbuf chain transmitted */
2306		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2307		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2308		    BUS_DMASYNC_POSTWRITE);
2309		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2310		m_freem(data->m);
2311		data->m = NULL;
2312	}
2313
2314	if (prog > 0) {
2315		sc->nfe_force_tx = 0;
2316		sc->txq.next = cons;
2317		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2318		if (sc->txq.queued == 0)
2319			sc->nfe_watchdog_timer = 0;
2320	}
2321}
2322
2323static int
2324nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2325{
2326	struct nfe_desc32 *desc32 = NULL;
2327	struct nfe_desc64 *desc64 = NULL;
2328	bus_dmamap_t map;
2329	bus_dma_segment_t segs[NFE_MAX_SCATTER];
2330	int error, i, nsegs, prod, si;
2331	uint32_t tso_segsz;
2332	uint16_t cflags, flags;
2333	struct mbuf *m;
2334
2335	prod = si = sc->txq.cur;
2336	map = sc->txq.data[prod].tx_data_map;
2337
2338	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2339	    &nsegs, BUS_DMA_NOWAIT);
2340	if (error == EFBIG) {
2341		m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2342		if (m == NULL) {
2343			m_freem(*m_head);
2344			*m_head = NULL;
2345			return (ENOBUFS);
2346		}
2347		*m_head = m;
2348		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2349		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2350		if (error != 0) {
2351			m_freem(*m_head);
2352			*m_head = NULL;
2353			return (ENOBUFS);
2354		}
2355	} else if (error != 0)
2356		return (error);
2357	if (nsegs == 0) {
2358		m_freem(*m_head);
2359		*m_head = NULL;
2360		return (EIO);
2361	}
2362
2363	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2364		bus_dmamap_unload(sc->txq.tx_data_tag, map);
2365		return (ENOBUFS);
2366	}
2367
2368	m = *m_head;
2369	cflags = flags = 0;
2370	tso_segsz = 0;
2371	if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2372		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2373			cflags |= NFE_TX_IP_CSUM;
2374		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2375			cflags |= NFE_TX_TCP_UDP_CSUM;
2376		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2377			cflags |= NFE_TX_TCP_UDP_CSUM;
2378	}
2379	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2380		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2381		    NFE_TX_TSO_SHIFT;
2382		cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2383		cflags |= NFE_TX_TSO;
2384	}
2385
2386	for (i = 0; i < nsegs; i++) {
2387		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2388			desc64 = &sc->txq.desc64[prod];
2389			desc64->physaddr[0] =
2390			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
2391			desc64->physaddr[1] =
2392			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2393			desc64->vtag = 0;
2394			desc64->length = htole16(segs[i].ds_len - 1);
2395			desc64->flags = htole16(flags);
2396		} else {
2397			desc32 = &sc->txq.desc32[prod];
2398			desc32->physaddr =
2399			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2400			desc32->length = htole16(segs[i].ds_len - 1);
2401			desc32->flags = htole16(flags);
2402		}
2403
2404		/*
2405		 * Setting of the valid bit in the first descriptor is
2406		 * deferred until the whole chain is fully setup.
2407		 */
2408		flags |= NFE_TX_VALID;
2409
2410		sc->txq.queued++;
2411		NFE_INC(prod, NFE_TX_RING_COUNT);
2412	}
2413
2414	/*
2415	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2416	 * csum flags, vtag and TSO belong to the first fragment only.
2417	 */
2418	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2419		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2420		desc64 = &sc->txq.desc64[si];
2421		if ((m->m_flags & M_VLANTAG) != 0)
2422			desc64->vtag = htole32(NFE_TX_VTAG |
2423			    m->m_pkthdr.ether_vtag);
2424		if (tso_segsz != 0) {
2425			/*
2426			 * XXX
2427			 * The following indicates the descriptor element
2428			 * is a 32bit quantity.
2429			 */
2430			desc64->length |= htole16((uint16_t)tso_segsz);
2431			desc64->flags |= htole16(tso_segsz >> 16);
2432		}
2433		/*
2434		 * finally, set the valid/checksum/TSO bit in the first
2435		 * descriptor.
2436		 */
2437		desc64->flags |= htole16(NFE_TX_VALID | cflags);
2438	} else {
2439		if (sc->nfe_flags & NFE_JUMBO_SUP)
2440			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2441		else
2442			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2443		desc32 = &sc->txq.desc32[si];
2444		if (tso_segsz != 0) {
2445			/*
2446			 * XXX
2447			 * The following indicates the descriptor element
2448			 * is a 32bit quantity.
2449			 */
2450			desc32->length |= htole16((uint16_t)tso_segsz);
2451			desc32->flags |= htole16(tso_segsz >> 16);
2452		}
2453		/*
2454		 * finally, set the valid/checksum/TSO bit in the first
2455		 * descriptor.
2456		 */
2457		desc32->flags |= htole16(NFE_TX_VALID | cflags);
2458	}
2459
2460	sc->txq.cur = prod;
2461	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2462	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2463	sc->txq.data[prod].tx_data_map = map;
2464	sc->txq.data[prod].m = m;
2465
2466	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2467
2468	return (0);
2469}
2470
2471
2472static void
2473nfe_setmulti(struct nfe_softc *sc)
2474{
2475	struct ifnet *ifp = sc->nfe_ifp;
2476	struct ifmultiaddr *ifma;
2477	int i;
2478	uint32_t filter;
2479	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2480	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2481		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2482	};
2483
2484	NFE_LOCK_ASSERT(sc);
2485
2486	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2487		bzero(addr, ETHER_ADDR_LEN);
2488		bzero(mask, ETHER_ADDR_LEN);
2489		goto done;
2490	}
2491
2492	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2493	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2494
2495	IF_ADDR_LOCK(ifp);
2496	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2497		u_char *addrp;
2498
2499		if (ifma->ifma_addr->sa_family != AF_LINK)
2500			continue;
2501
2502		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2503		for (i = 0; i < ETHER_ADDR_LEN; i++) {
2504			u_int8_t mcaddr = addrp[i];
2505			addr[i] &= mcaddr;
2506			mask[i] &= ~mcaddr;
2507		}
2508	}
2509	IF_ADDR_UNLOCK(ifp);
2510
2511	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2512		mask[i] |= addr[i];
2513	}
2514
2515done:
2516	addr[0] |= 0x01;	/* make sure multicast bit is set */
2517
2518	NFE_WRITE(sc, NFE_MULTIADDR_HI,
2519	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2520	NFE_WRITE(sc, NFE_MULTIADDR_LO,
2521	    addr[5] <<  8 | addr[4]);
2522	NFE_WRITE(sc, NFE_MULTIMASK_HI,
2523	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2524	NFE_WRITE(sc, NFE_MULTIMASK_LO,
2525	    mask[5] <<  8 | mask[4]);
2526
2527	filter = NFE_READ(sc, NFE_RXFILTER);
2528	filter &= NFE_PFF_RX_PAUSE;
2529	filter |= NFE_RXFILTER_MAGIC;
2530	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2531	NFE_WRITE(sc, NFE_RXFILTER, filter);
2532}
2533
2534
2535static void
2536nfe_tx_task(void *arg, int pending)
2537{
2538	struct ifnet *ifp;
2539
2540	ifp = (struct ifnet *)arg;
2541	nfe_start(ifp);
2542}
2543
2544
2545static void
2546nfe_start(struct ifnet *ifp)
2547{
2548	struct nfe_softc *sc = ifp->if_softc;
2549	struct mbuf *m0;
2550	int enq;
2551
2552	NFE_LOCK(sc);
2553
2554	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2555	    IFF_DRV_RUNNING || sc->nfe_link == 0) {
2556		NFE_UNLOCK(sc);
2557		return;
2558	}
2559
2560	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2561		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2562		if (m0 == NULL)
2563			break;
2564
2565		if (nfe_encap(sc, &m0) != 0) {
2566			if (m0 == NULL)
2567				break;
2568			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2569			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2570			break;
2571		}
2572		enq++;
2573		ETHER_BPF_MTAP(ifp, m0);
2574	}
2575
2576	if (enq > 0) {
2577		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2578		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2579
2580		/* kick Tx */
2581		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2582
2583		/*
2584		 * Set a timeout in case the chip goes out to lunch.
2585		 */
2586		sc->nfe_watchdog_timer = 5;
2587	}
2588
2589	NFE_UNLOCK(sc);
2590}
2591
2592
2593static void
2594nfe_watchdog(struct ifnet *ifp)
2595{
2596	struct nfe_softc *sc = ifp->if_softc;
2597
2598	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2599		return;
2600
2601	/* Check if we've lost Tx completion interrupt. */
2602	nfe_txeof(sc);
2603	if (sc->txq.queued == 0) {
2604		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2605		    "-- recovering\n");
2606		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2607			taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2608		return;
2609	}
2610	/* Check if we've lost start Tx command. */
2611	sc->nfe_force_tx++;
2612	if (sc->nfe_force_tx <= 3) {
2613		/*
2614		 * If this is the case for watchdog timeout, the following
2615		 * code should go to nfe_txeof().
2616		 */
2617		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2618		return;
2619	}
2620	sc->nfe_force_tx = 0;
2621
2622	if_printf(ifp, "watchdog timeout\n");
2623
2624	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2625	ifp->if_oerrors++;
2626	nfe_init_locked(sc);
2627}
2628
2629
2630static void
2631nfe_init(void *xsc)
2632{
2633	struct nfe_softc *sc = xsc;
2634
2635	NFE_LOCK(sc);
2636	nfe_init_locked(sc);
2637	NFE_UNLOCK(sc);
2638}
2639
2640
2641static void
2642nfe_init_locked(void *xsc)
2643{
2644	struct nfe_softc *sc = xsc;
2645	struct ifnet *ifp = sc->nfe_ifp;
2646	struct mii_data *mii;
2647	uint32_t val;
2648	int error;
2649
2650	NFE_LOCK_ASSERT(sc);
2651
2652	mii = device_get_softc(sc->nfe_miibus);
2653
2654	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2655		return;
2656
2657	nfe_stop(ifp);
2658
2659	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2660
2661	nfe_init_tx_ring(sc, &sc->txq);
2662	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2663		error = nfe_init_jrx_ring(sc, &sc->jrxq);
2664	else
2665		error = nfe_init_rx_ring(sc, &sc->rxq);
2666	if (error != 0) {
2667		device_printf(sc->nfe_dev,
2668		    "initialization failed: no memory for rx buffers\n");
2669		nfe_stop(ifp);
2670		return;
2671	}
2672
2673	val = 0;
2674	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2675		val |= NFE_MAC_ADDR_INORDER;
2676	NFE_WRITE(sc, NFE_TX_UNK, val);
2677	NFE_WRITE(sc, NFE_STATUS, 0);
2678
2679	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2680		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2681
2682	sc->rxtxctl = NFE_RXTX_BIT2;
2683	if (sc->nfe_flags & NFE_40BIT_ADDR)
2684		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2685	else if (sc->nfe_flags & NFE_JUMBO_SUP)
2686		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2687
2688	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2689		sc->rxtxctl |= NFE_RXTX_RXCSUM;
2690	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2691		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2692
2693	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2694	DELAY(10);
2695	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2696
2697	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2698		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2699	else
2700		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2701
2702	NFE_WRITE(sc, NFE_SETUP_R6, 0);
2703
2704	/* set MAC address */
2705	nfe_set_macaddr(sc, IF_LLADDR(ifp));
2706
2707	/* tell MAC where rings are in memory */
2708	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2709		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2710		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
2711		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2712		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
2713	} else {
2714		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2715		    NFE_ADDR_HI(sc->rxq.physaddr));
2716		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2717		    NFE_ADDR_LO(sc->rxq.physaddr));
2718	}
2719	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2720	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2721
2722	NFE_WRITE(sc, NFE_RING_SIZE,
2723	    (NFE_RX_RING_COUNT - 1) << 16 |
2724	    (NFE_TX_RING_COUNT - 1));
2725
2726	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2727
2728	/* force MAC to wakeup */
2729	val = NFE_READ(sc, NFE_PWR_STATE);
2730	if ((val & NFE_PWR_WAKEUP) == 0)
2731		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2732	DELAY(10);
2733	val = NFE_READ(sc, NFE_PWR_STATE);
2734	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2735
2736#if 1
2737	/* configure interrupts coalescing/mitigation */
2738	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2739#else
2740	/* no interrupt mitigation: one interrupt per packet */
2741	NFE_WRITE(sc, NFE_IMTIMER, 970);
2742#endif
2743
2744	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2745	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2746	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2747
2748	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2749	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2750
2751	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2752	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
2753
2754	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2755	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2756	DELAY(10);
2757	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2758
2759	/* set Rx filter */
2760	nfe_setmulti(sc);
2761
2762	/* enable Rx */
2763	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2764
2765	/* enable Tx */
2766	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2767
2768	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2769
2770#ifdef DEVICE_POLLING
2771	if (ifp->if_capenable & IFCAP_POLLING)
2772		nfe_disable_intr(sc);
2773	else
2774#endif
2775	nfe_set_intr(sc);
2776	nfe_enable_intr(sc); /* enable interrupts */
2777
2778	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2779	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2780
2781	sc->nfe_link = 0;
2782	mii_mediachg(mii);
2783
2784	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2785}
2786
2787
2788static void
2789nfe_stop(struct ifnet *ifp)
2790{
2791	struct nfe_softc *sc = ifp->if_softc;
2792	struct nfe_rx_ring *rx_ring;
2793	struct nfe_jrx_ring *jrx_ring;
2794	struct nfe_tx_ring *tx_ring;
2795	struct nfe_rx_data *rdata;
2796	struct nfe_tx_data *tdata;
2797	int i;
2798
2799	NFE_LOCK_ASSERT(sc);
2800
2801	sc->nfe_watchdog_timer = 0;
2802	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2803
2804	callout_stop(&sc->nfe_stat_ch);
2805
2806	/* abort Tx */
2807	NFE_WRITE(sc, NFE_TX_CTL, 0);
2808
2809	/* disable Rx */
2810	NFE_WRITE(sc, NFE_RX_CTL, 0);
2811
2812	/* disable interrupts */
2813	nfe_disable_intr(sc);
2814
2815	sc->nfe_link = 0;
2816
2817	/* free Rx and Tx mbufs still in the queues. */
2818	rx_ring = &sc->rxq;
2819	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2820		rdata = &rx_ring->data[i];
2821		if (rdata->m != NULL) {
2822			bus_dmamap_sync(rx_ring->rx_data_tag,
2823			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2824			bus_dmamap_unload(rx_ring->rx_data_tag,
2825			    rdata->rx_data_map);
2826			m_freem(rdata->m);
2827			rdata->m = NULL;
2828		}
2829	}
2830
2831	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2832		jrx_ring = &sc->jrxq;
2833		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2834			rdata = &jrx_ring->jdata[i];
2835			if (rdata->m != NULL) {
2836				bus_dmamap_sync(jrx_ring->jrx_data_tag,
2837				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2838				bus_dmamap_unload(jrx_ring->jrx_data_tag,
2839				    rdata->rx_data_map);
2840				m_freem(rdata->m);
2841				rdata->m = NULL;
2842			}
2843		}
2844	}
2845
2846	tx_ring = &sc->txq;
2847	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2848		tdata = &tx_ring->data[i];
2849		if (tdata->m != NULL) {
2850			bus_dmamap_sync(tx_ring->tx_data_tag,
2851			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2852			bus_dmamap_unload(tx_ring->tx_data_tag,
2853			    tdata->tx_data_map);
2854			m_freem(tdata->m);
2855			tdata->m = NULL;
2856		}
2857	}
2858}
2859
2860
2861static int
2862nfe_ifmedia_upd(struct ifnet *ifp)
2863{
2864	struct nfe_softc *sc = ifp->if_softc;
2865	struct mii_data *mii;
2866
2867	NFE_LOCK(sc);
2868	mii = device_get_softc(sc->nfe_miibus);
2869	mii_mediachg(mii);
2870	NFE_UNLOCK(sc);
2871
2872	return (0);
2873}
2874
2875
2876static void
2877nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2878{
2879	struct nfe_softc *sc;
2880	struct mii_data *mii;
2881
2882	sc = ifp->if_softc;
2883
2884	NFE_LOCK(sc);
2885	mii = device_get_softc(sc->nfe_miibus);
2886	mii_pollstat(mii);
2887	NFE_UNLOCK(sc);
2888
2889	ifmr->ifm_active = mii->mii_media_active;
2890	ifmr->ifm_status = mii->mii_media_status;
2891}
2892
2893
2894void
2895nfe_tick(void *xsc)
2896{
2897	struct nfe_softc *sc;
2898	struct mii_data *mii;
2899	struct ifnet *ifp;
2900
2901	sc = (struct nfe_softc *)xsc;
2902
2903	NFE_LOCK_ASSERT(sc);
2904
2905	ifp = sc->nfe_ifp;
2906
2907	mii = device_get_softc(sc->nfe_miibus);
2908	mii_tick(mii);
2909	nfe_watchdog(ifp);
2910	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2911}
2912
2913
2914static int
2915nfe_shutdown(device_t dev)
2916{
2917	struct nfe_softc *sc;
2918	struct ifnet *ifp;
2919
2920	sc = device_get_softc(dev);
2921
2922	NFE_LOCK(sc);
2923	ifp = sc->nfe_ifp;
2924	nfe_stop(ifp);
2925	/* nfe_reset(sc); */
2926	NFE_UNLOCK(sc);
2927
2928	return (0);
2929}
2930
2931
2932static void
2933nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2934{
2935	uint32_t val;
2936
2937	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2938		val = NFE_READ(sc, NFE_MACADDR_LO);
2939		addr[0] = (val >> 8) & 0xff;
2940		addr[1] = (val & 0xff);
2941
2942		val = NFE_READ(sc, NFE_MACADDR_HI);
2943		addr[2] = (val >> 24) & 0xff;
2944		addr[3] = (val >> 16) & 0xff;
2945		addr[4] = (val >>  8) & 0xff;
2946		addr[5] = (val & 0xff);
2947	} else {
2948		val = NFE_READ(sc, NFE_MACADDR_LO);
2949		addr[5] = (val >> 8) & 0xff;
2950		addr[4] = (val & 0xff);
2951
2952		val = NFE_READ(sc, NFE_MACADDR_HI);
2953		addr[3] = (val >> 24) & 0xff;
2954		addr[2] = (val >> 16) & 0xff;
2955		addr[1] = (val >>  8) & 0xff;
2956		addr[0] = (val & 0xff);
2957	}
2958}
2959
2960
2961static void
2962nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2963{
2964
2965	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2966	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2967	    addr[1] << 8 | addr[0]);
2968}
2969
2970
2971/*
2972 * Map a single buffer address.
2973 */
2974
2975static void
2976nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2977{
2978	struct nfe_dmamap_arg *ctx;
2979
2980	if (error != 0)
2981		return;
2982
2983	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2984
2985	ctx = (struct nfe_dmamap_arg *)arg;
2986	ctx->nfe_busaddr = segs[0].ds_addr;
2987}
2988
2989
2990static int
2991sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2992{
2993	int error, value;
2994
2995	if (!arg1)
2996		return (EINVAL);
2997	value = *(int *)arg1;
2998	error = sysctl_handle_int(oidp, &value, 0, req);
2999	if (error || !req->newptr)
3000		return (error);
3001	if (value < low || value > high)
3002		return (EINVAL);
3003	*(int *)arg1 = value;
3004
3005	return (0);
3006}
3007
3008
3009static int
3010sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3011{
3012
3013	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3014	    NFE_PROC_MAX));
3015}
3016